Search is not available for this dataset
id
stringlengths 1
8
| text
stringlengths 72
9.81M
| addition_count
int64 0
10k
| commit_subject
stringlengths 0
3.7k
| deletion_count
int64 0
8.43k
| file_extension
stringlengths 0
32
| lang
stringlengths 1
94
| license
stringclasses 10
values | repo_name
stringlengths 9
59
|
---|---|---|---|---|---|---|---|---|
10071050 | <NME> helper_spec.rb
<BEF> # frozen_string_literal: true
require "spec_helper"
# TODO change some of these tests to use Rack::Test
describe Split::Helper do
include Split::Helper
let(:experiment) {
Split::ExperimentCatalog.find_or_create("link_color", "blue", "red")
}
describe "ab_test" do
it "should not raise an error when passed strings for alternatives" do
expect { ab_test("xyz", "1", "2", "3") }.not_to raise_error
end
it "should not raise an error when passed an array for alternatives" do
expect { ab_test("xyz", ["1", "2", "3"]) }.not_to raise_error
end
it "should raise the appropriate error when passed integers for alternatives" do
expect { ab_test("xyz", 1, 2, 3) }.to raise_error(ArgumentError)
end
it "should raise the appropriate error when passed symbols for alternatives" do
expect { ab_test("xyz", :a, :b, :c) }.to raise_error(ArgumentError)
end
it "should not raise error when passed an array for goals" do
expect { ab_test({ "link_color" => ["purchase", "refund"] }, "blue", "red") }.not_to raise_error
end
it "should not raise error when passed just one goal" do
expect { ab_test({ "link_color" => "purchase" }, "blue", "red") }.not_to raise_error
end
it "raises an appropriate error when processing combined expirements" do
Split.configuration.experiments = {
combined_exp_1: {
alternatives: [ { name: "control", percent: 50 }, { name: "test-alt", percent: 50 } ],
metric: :my_metric,
combined_experiments: [:combined_exp_1_sub_1]
}
}
Split::ExperimentCatalog.find_or_create("combined_exp_1")
expect { ab_test("combined_exp_1") }.to raise_error(Split::InvalidExperimentsFormatError)
end
it "should assign a random alternative to a new user when there are an equal number of alternatives assigned" do
ab_test("link_color", "blue", "red")
expect(["red", "blue"]).to include(ab_user["link_color"])
end
it "should increment the participation counter after assignment to a new user" do
previous_red_count = Split::Alternative.new("red", "link_color").participant_count
previous_blue_count = Split::Alternative.new("blue", "link_color").participant_count
ab_test("link_color", "blue", "red")
new_red_count = Split::Alternative.new("red", "link_color").participant_count
new_blue_count = Split::Alternative.new("blue", "link_color").participant_count
expect((new_red_count + new_blue_count)).to eq(previous_red_count + previous_blue_count + 1)
end
it "should not increment the counter for an experiment that the user is not participating in" do
ab_test("link_color", "blue", "red")
e = Split::ExperimentCatalog.find_or_create("button_size", "small", "big")
expect {
# User shouldn't participate in this second experiment
ab_test("button_size", "small", "big")
}.not_to change { e.participant_count }
end
it "should not increment the counter for an ended experiment" do
e = Split::ExperimentCatalog.find_or_create("button_size", "small", "big")
e.winner = "small"
expect {
a = ab_test("button_size", "small", "big")
expect(a).to eq("small")
}.not_to change { e.participant_count }
end
it "should not increment the counter for an not started experiment" do
expect(Split.configuration).to receive(:start_manually).and_return(true)
e = Split::ExperimentCatalog.find_or_create("button_size", "small", "big")
expect {
a = ab_test("button_size", "small", "big")
expect(a).to eq("small")
}.not_to change { e.participant_count }
end
it "should return the given alternative for an existing user" do
expect(ab_test("link_color", "blue", "red")).to eq ab_test("link_color", "blue", "red")
end
it "should always return the winner if one is present" do
experiment.winner = "orange"
expect(ab_test("link_color", "blue", "red")).to eq("orange")
end
it "should allow the alternative to be forced by passing it in the params" do
# ?ab_test[link_color]=blue
@params = { "ab_test" => { "link_color" => "blue" } }
button_size_alt = Split::Alternative.new(button_size, 'button_size')
button_size_alt.participant_count.should eql(1)
end
end
describe 'finished' do
it "should not allow an arbitrary alternative" do
@params = { "ab_test" => { "link_color" => "pink" } }
alternative = ab_test("link_color", "blue")
expect(alternative).to eq("blue")
end
it "should not store the split when a param forced alternative" do
@params = { "ab_test" => { "link_color" => "blue" } }
expect(ab_user).not_to receive(:[]=)
ab_test("link_color", "blue", "red")
end
it "SPLIT_DISABLE query parameter should also force the alternative (uses control)" do
@params = { "SPLIT_DISABLE" => "true" }
alternative = ab_test("link_color", "blue", "red")
expect(alternative).to eq("blue")
alternative = ab_test("link_color", { "blue" => 1 }, "red" => 5)
expect(alternative).to eq("blue")
alternative = ab_test("link_color", "red", "blue")
expect(alternative).to eq("red")
alternative = ab_test("link_color", { "red" => 5 }, "blue" => 1)
expect(alternative).to eq("red")
end
it "should not store the split when Split generically disabled" do
@params = { "SPLIT_DISABLE" => "true" }
expect(ab_user).not_to receive(:[]=)
ab_test("link_color", "blue", "red")
end
context "when store_override is set" do
before { Split.configuration.store_override = true }
it "should store the forced alternative" do
@params = { "ab_test" => { "link_color" => "blue" } }
expect(ab_user).to receive(:[]=).with("link_color", "blue")
ab_test("link_color", "blue", "red")
end
end
context "when on_trial_choose is set" do
before { Split.configuration.on_trial_choose = :some_method }
it "should call the method" do
expect(self).to receive(:some_method)
ab_test("link_color", "blue", "red")
end
end
it "should allow passing a block" do
alt = ab_test("link_color", "blue", "red")
ret = ab_test("link_color", "blue", "red") { |alternative| "shared/#{alternative}" }
expect(ret).to eq("shared/#{alt}")
end
it "should allow the share of visitors see an alternative to be specified" do
ab_test("link_color", { "blue" => 0.8 }, { "red" => 20 })
expect(["red", "blue"]).to include(ab_user["link_color"])
end
it "should allow alternative weighting interface as a single hash" do
ab_test("link_color", { "blue" => 0.01 }, "red" => 0.2)
experiment = Split::ExperimentCatalog.find("link_color")
expect(experiment.alternatives.map(&:name)).to eq(["blue", "red"])
expect(experiment.alternatives.collect { |a| a.weight }).to match_array([0.01, 0.2])
end
it "should only let a user participate in one experiment at a time" do
link_color = ab_test("link_color", "blue", "red")
ab_test("button_size", "small", "big")
expect(ab_user["link_color"]).to eq(link_color)
big = Split::Alternative.new("big", "button_size")
expect(big.participant_count).to eq(0)
small = Split::Alternative.new("small", "button_size")
expect(small.participant_count).to eq(0)
end
it "should let a user participate in many experiment with allow_multiple_experiments option" do
Split.configure do |config|
config.allow_multiple_experiments = true
end
link_color = ab_test("link_color", "blue", "red")
button_size = ab_test("button_size", "small", "big")
expect(ab_user["link_color"]).to eq(link_color)
expect(ab_user["button_size"]).to eq(button_size)
button_size_alt = Split::Alternative.new(button_size, "button_size")
expect(button_size_alt.participant_count).to eq(1)
end
context "with allow_multiple_experiments = 'control'" do
it "should let a user participate in many experiment with one non-'control' alternative" do
Split.configure do |config|
config.allow_multiple_experiments = "control"
end
groups = 100.times.map do |n|
ab_test("test#{n}".to_sym, { "control" => (100 - n) }, { "test#{n}-alt" => n })
end
experiments = ab_user.active_experiments
expect(experiments.size).to be > 1
count_control = experiments.values.count { |g| g == "control" }
expect(count_control).to eq(experiments.size - 1)
count_alts = groups.count { |g| g != "control" }
expect(count_alts).to eq(1)
end
context "when user already has experiment" do
let(:mock_user) { Split::User.new(self, { "test_0" => "test-alt" }) }
before do
Split.configure do |config|
config.allow_multiple_experiments = "control"
end
Split::ExperimentCatalog.find_or_initialize("test_0", "control", "test-alt").save
Split::ExperimentCatalog.find_or_initialize("test_1", "control", "test-alt").save
end
it "should restore previously selected alternative" do
expect(ab_user.active_experiments.size).to eq 1
expect(ab_test(:test_0, { "control" => 100 }, { "test-alt" => 1 })).to eq "test-alt"
expect(ab_test(:test_0, { "control" => 1 }, { "test-alt" => 100 })).to eq "test-alt"
end
it "should select the correct alternatives after experiment resets" do
experiment = Split::ExperimentCatalog.find(:test_0)
experiment.reset
mock_user[experiment.key] = "test-alt"
expect(ab_user.active_experiments.size).to eq 1
expect(ab_test(:test_0, { "control" => 100 }, { "test-alt" => 1 })).to eq "test-alt"
expect(ab_test(:test_0, { "control" => 0 }, { "test-alt" => 100 })).to eq "test-alt"
end
it "lets override existing choice" do
pending "this requires user store reset on first call not depending on whelther it is current trial"
@params = { "ab_test" => { "test_1" => "test-alt" } }
expect(ab_test(:test_0, { "control" => 0 }, { "test-alt" => 100 })).to eq "control"
expect(ab_test(:test_1, { "control" => 100 }, { "test-alt" => 1 })).to eq "test-alt"
end
end
end
it "should not over-write a finished key when an experiment is on a later version" do
experiment.increment_version
ab_user = { experiment.key => "blue", experiment.finished_key => true }
finished_session = ab_user.dup
ab_test("link_color", "blue", "red")
expect(ab_user).to eq(finished_session)
end
end
describe "metadata" do
context "is defined" do
before do
Split.configuration.experiments = {
my_experiment: {
alternatives: ["one", "two"],
resettable: false,
metadata: { "one" => "Meta1", "two" => "Meta2" }
}
}
end
it "should be passed to helper block" do
@params = { "ab_test" => { "my_experiment" => "two" } }
expect(ab_test("my_experiment")).to eq "two"
expect(ab_test("my_experiment") do |alternative, meta|
meta
end).to eq("Meta2")
end
it "should pass control metadata helper block if library disabled" do
Split.configure do |config|
config.enabled = false
end
expect(ab_test("my_experiment")).to eq "one"
expect(ab_test("my_experiment") do |_, meta|
meta
end).to eq("Meta1")
end
end
context "is not defined" do
before do
Split.configuration.experiments = {
my_experiment: {
alternatives: ["one", "two"],
resettable: false,
metadata: nil
}
}
end
it "should be passed to helper block" do
expect(ab_test("my_experiment") do |alternative, meta|
meta
end).to eq({})
end
it "should pass control metadata helper block if library disabled" do
Split.configure do |config|
config.enabled = false
end
expect(ab_test("my_experiment") do |_, meta|
meta
end).to eq({})
end
end
end
describe "ab_finished" do
context "for an experiment that the user participates in" do
before(:each) do
@experiment_name = "link_color"
@alternatives = ["blue", "red"]
@experiment = Split::ExperimentCatalog.find_or_create(@experiment_name, *@alternatives)
@alternative_name = ab_test(@experiment_name, *@alternatives)
@previous_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count
end
it "should increment the counter for the completed alternative" do
ab_finished(@experiment_name)
new_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count
expect(new_completion_count).to eq(@previous_completion_count + 1)
end
it "should set experiment's finished key if reset is false" do
ab_finished(@experiment_name, { reset: false })
expect(ab_user[@experiment.key]).to eq(@alternative_name)
expect(ab_user[@experiment.finished_key]).to eq(true)
end
it "should not increment the counter if reset is false and the experiment has been already finished" do
2.times { ab_finished(@experiment_name, { reset: false }) }
new_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count
expect(new_completion_count).to eq(@previous_completion_count + 1)
end
it "should not increment the counter for an ended experiment" do
e = Split::ExperimentCatalog.find_or_create("button_size", "small", "big")
e.winner = "small"
a = ab_test("button_size", "small", "big")
expect(a).to eq("small")
expect {
ab_finished("button_size")
}.not_to change { Split::Alternative.new(a, "button_size").completed_count }
end
it "should clear out the user's participation from their session" do
expect(ab_user[@experiment.key]).to eq(@alternative_name)
ab_finished(@experiment_name)
expect(ab_user.keys).to be_empty
end
it "should not clear out the users session if reset is false" do
expect(ab_user[@experiment.key]).to eq(@alternative_name)
ab_finished(@experiment_name, { reset: false })
expect(ab_user[@experiment.key]).to eq(@alternative_name)
expect(ab_user[@experiment.finished_key]).to eq(true)
end
it "should reset the users session when experiment is not versioned" do
expect(ab_user[@experiment.key]).to eq(@alternative_name)
ab_finished(@experiment_name)
expect(ab_user.keys).to be_empty
end
it "should reset the users session when experiment is versioned" do
@experiment.increment_version
@alternative_name = ab_test(@experiment_name, *@alternatives)
expect(ab_user[@experiment.key]).to eq(@alternative_name)
ab_finished(@experiment_name)
expect(ab_user.keys).to be_empty
end
context "when on_trial_complete is set" do
before { Split.configuration.on_trial_complete = :some_method }
it "should call the method" do
expect(self).to receive(:some_method)
ab_finished(@experiment_name)
end
it "should not call the method without alternative" do
ab_user[@experiment.key] = nil
expect(self).not_to receive(:some_method)
ab_finished(@experiment_name)
end
end
end
context "for an experiment that the user is excluded from" do
before do
alternative = ab_test("link_color", "blue", "red")
expect(Split::Alternative.new(alternative, "link_color").participant_count).to eq(1)
alternative = ab_test("button_size", "small", "big")
expect(Split::Alternative.new(alternative, "button_size").participant_count).to eq(0)
end
it "should not increment the completed counter" do
# So, user should be participating in the link_color experiment and
# receive the control for button_size. As the user is not participating in
# the button size experiment, finishing it should not increase the
# completion count for that alternative.
expect {
ab_finished("button_size")
}.not_to change { Split::Alternative.new("small", "button_size").completed_count }
end
end
context "for an experiment that the user does not participate in" do
before do
Split::ExperimentCatalog.find_or_create(:not_started_experiment, "control", "alt")
end
it "should not raise an exception" do
expect { ab_finished(:not_started_experiment) }.not_to raise_exception
end
it "should not change the user state when reset is false" do
expect { ab_finished(:not_started_experiment, reset: false) }.not_to change { ab_user.keys }.from([])
end
it "should not change the user state when reset is true" do
expect(self).not_to receive(:reset!)
ab_finished(:not_started_experiment)
end
it "should not increment the completed counter" do
ab_finished(:not_started_experiment)
expect(Split::Alternative.new("control", :not_started_experiment).completed_count).to eq(0)
expect(Split::Alternative.new("alt", :not_started_experiment).completed_count).to eq(0)
end
end
end
context "finished with config" do
it "passes reset option" do
Split.configuration.experiments = {
my_experiment: {
alternatives: ["one", "two"],
resettable: false,
}
}
alternative = ab_test(:my_experiment)
experiment = Split::ExperimentCatalog.find :my_experiment
ab_finished :my_experiment
expect(ab_user[experiment.key]).to eq(alternative)
expect(ab_user[experiment.finished_key]).to eq(true)
end
end
context "finished with metric name" do
before { Split.configuration.experiments = {} }
before { expect(Split::Alternative).to receive(:new).at_least(1).times.and_call_original }
def should_finish_experiment(experiment_name, should_finish = true)
alts = Split.configuration.experiments[experiment_name][:alternatives]
experiment = Split::ExperimentCatalog.find_or_create(experiment_name, *alts)
alt_name = ab_user[experiment.key] = alts.first
alt = double("alternative")
expect(alt).to receive(:name).at_most(1).times.and_return(alt_name)
expect(Split::Alternative).to receive(:new).at_most(1).times.with(alt_name, experiment_name.to_s).and_return(alt)
if should_finish
expect(alt).to receive(:increment_completion).at_most(1).times
else
expect(alt).not_to receive(:increment_completion)
end
end
it "completes the test" do
Split.configuration.experiments[:my_experiment] = {
alternatives: [ "control_opt", "other_opt" ],
metric: :my_metric
}
should_finish_experiment :my_experiment
ab_finished :my_metric
end
it "completes all relevant tests" do
Split.configuration.experiments = {
exp_1: {
alternatives: [ "1-1", "1-2" ],
metric: :my_metric
},
exp_2: {
alternatives: [ "2-1", "2-2" ],
metric: :another_metric
},
exp_3: {
alternatives: [ "3-1", "3-2" ],
metric: :my_metric
},
}
should_finish_experiment :exp_1
should_finish_experiment :exp_2, false
should_finish_experiment :exp_3
ab_finished :my_metric
end
it "passes reset option" do
Split.configuration.experiments = {
my_exp: {
alternatives: ["one", "two"],
metric: :my_metric,
resettable: false,
}
}
alternative_name = ab_test(:my_exp)
exp = Split::ExperimentCatalog.find :my_exp
ab_finished :my_metric
expect(ab_user[exp.key]).to eq(alternative_name)
expect(ab_user[exp.finished_key]).to be_truthy
end
it "passes through options" do
Split.configuration.experiments = {
my_exp: {
alternatives: ["one", "two"],
metric: :my_metric,
}
}
alternative_name = ab_test(:my_exp)
exp = Split::ExperimentCatalog.find :my_exp
ab_finished :my_metric, reset: false
expect(ab_user[exp.key]).to eq(alternative_name)
expect(ab_user[exp.finished_key]).to be_truthy
end
end
describe "conversions" do
it "should return a conversion rate for an alternative" do
alternative_name = ab_test("link_color", "blue", "red")
previous_convertion_rate = Split::Alternative.new(alternative_name, "link_color").conversion_rate
expect(previous_convertion_rate).to eq(0.0)
ab_finished("link_color")
new_convertion_rate = Split::Alternative.new(alternative_name, "link_color").conversion_rate
expect(new_convertion_rate).to eq(1.0)
end
end
describe "active experiments" do
it "should show an active test" do
alternative = ab_test("def", "4", "5", "6")
expect(active_experiments.count).to eq 1
expect(active_experiments.first[0]).to eq "def"
expect(active_experiments.first[1]).to eq alternative
end
it "should show a finished test" do
alternative = ab_test("def", "4", "5", "6")
ab_finished("def", { reset: false })
expect(active_experiments.count).to eq 1
expect(active_experiments.first[0]).to eq "def"
expect(active_experiments.first[1]).to eq alternative
end
it "should show an active test when an experiment is on a later version" do
experiment.reset
expect(experiment.version).to eq(1)
ab_test("link_color", "blue", "red")
expect(active_experiments.count).to eq 1
expect(active_experiments.first[0]).to eq "link_color"
end
it "should show versioned tests properly" do
10.times { experiment.reset }
alternative = ab_test(experiment.name, "blue", "red")
ab_finished(experiment.name, reset: false)
expect(experiment.version).to eq(10)
expect(active_experiments.count).to eq 1
expect(active_experiments).to eq({ "link_color" => alternative })
end
it "should show multiple tests" do
Split.configure do |config|
config.allow_multiple_experiments = true
end
alternative = ab_test("def", "4", "5", "6")
another_alternative = ab_test("ghi", "7", "8", "9")
expect(active_experiments.count).to eq 2
expect(active_experiments["def"]).to eq alternative
expect(active_experiments["ghi"]).to eq another_alternative
end
it "should not show tests with winners" do
Split.configure do |config|
config.allow_multiple_experiments = true
end
e = Split::ExperimentCatalog.find_or_create("def", "4", "5", "6")
e.winner = "4"
ab_test("def", "4", "5", "6")
another_alternative = ab_test("ghi", "7", "8", "9")
expect(active_experiments.count).to eq 1
expect(active_experiments.first[0]).to eq "ghi"
expect(active_experiments.first[1]).to eq another_alternative
end
end
describe "when user is a robot" do
before(:each) do
@request = OpenStruct.new(user_agent: "Googlebot/2.1 (+http://www.google.com/bot.html)")
end
describe "ab_test" do
it "should return the control" do
alternative = ab_test("link_color", "blue", "red")
expect(alternative).to eq experiment.control.name
end
it "should not create a experiment" do
ab_test("link_color", "blue", "red")
expect(Split::Experiment.new("link_color")).to be_a_new_record
end
it "should not increment the participation count" do
previous_red_count = Split::Alternative.new("red", "link_color").participant_count
previous_blue_count = Split::Alternative.new("blue", "link_color").participant_count
ab_test("link_color", "blue", "red")
new_red_count = Split::Alternative.new("red", "link_color").participant_count
new_blue_count = Split::Alternative.new("blue", "link_color").participant_count
expect((new_red_count + new_blue_count)).to eq(previous_red_count + previous_blue_count)
end
end
describe "finished" do
it "should not increment the completed count" do
alternative_name = ab_test("link_color", "blue", "red")
previous_completion_count = Split::Alternative.new(alternative_name, "link_color").completed_count
ab_finished("link_color")
new_completion_count = Split::Alternative.new(alternative_name, "link_color").completed_count
expect(new_completion_count).to eq(previous_completion_count)
end
end
end
describe "when providing custom ignore logic" do
context "using a proc to configure custom logic" do
before(:each) do
Split.configure do |c|
c.ignore_filter = proc { |request| true } # ignore everything
end
end
it "ignores the ab_test" do
ab_test("link_color", "blue", "red")
red_count = Split::Alternative.new("red", "link_color").participant_count
blue_count = Split::Alternative.new("blue", "link_color").participant_count
expect((red_count + blue_count)).to be(0)
end
end
end
shared_examples_for "a disabled test" do
describe "ab_test" do
it "should return the control" do
alternative = ab_test("link_color", "blue", "red")
expect(alternative).to eq experiment.control.name
end
it "should not increment the participation count" do
previous_red_count = Split::Alternative.new("red", "link_color").participant_count
previous_blue_count = Split::Alternative.new("blue", "link_color").participant_count
ab_test("link_color", "blue", "red")
new_red_count = Split::Alternative.new("red", "link_color").participant_count
new_blue_count = Split::Alternative.new("blue", "link_color").participant_count
expect((new_red_count + new_blue_count)).to eq(previous_red_count + previous_blue_count)
end
end
describe "finished" do
it "should not increment the completed count" do
alternative_name = ab_test("link_color", "blue", "red")
previous_completion_count = Split::Alternative.new(alternative_name, "link_color").completed_count
ab_finished("link_color")
new_completion_count = Split::Alternative.new(alternative_name, "link_color").completed_count
expect(new_completion_count).to eq(previous_completion_count)
end
end
end
describe "when ip address is ignored" do
context "individually" do
before(:each) do
@request = OpenStruct.new(ip: "81.19.48.130")
Split.configure do |c|
c.ignore_ip_addresses << "81.19.48.130"
end
end
it_behaves_like "a disabled test"
end
context "for a range" do
before(:each) do
@request = OpenStruct.new(ip: "81.19.48.129")
Split.configure do |c|
c.ignore_ip_addresses << /81\.19\.48\.[0-9]+/
end
end
it_behaves_like "a disabled test"
end
context "using both a range and a specific value" do
before(:each) do
@request = OpenStruct.new(ip: "81.19.48.128")
Split.configure do |c|
c.ignore_ip_addresses << "81.19.48.130"
c.ignore_ip_addresses << /81\.19\.48\.[0-9]+/
end
end
it_behaves_like "a disabled test"
end
context "when ignored other address" do
before do
@request = OpenStruct.new(ip: "1.1.1.1")
Split.configure do |c|
c.ignore_ip_addresses << "81.19.48.130"
end
end
it "works as usual" do
alternative_name = ab_test("link_color", "red", "blue")
expect {
ab_finished("link_color")
}.to change(Split::Alternative.new(alternative_name, "link_color"), :completed_count).by(1)
end
end
end
describe "when user is previewing" do
before(:each) do
@request = OpenStruct.new(headers: { "x-purpose" => "preview" })
end
it_behaves_like "a disabled test"
end
describe "versioned experiments" do
it "should use version zero if no version is present" do
alternative_name = ab_test("link_color", "blue", "red")
expect(experiment.version).to eq(0)
expect(ab_user["link_color"]).to eq(alternative_name)
end
it "should save the version of the experiment to the session" do
experiment.reset
expect(experiment.version).to eq(1)
alternative_name = ab_test("link_color", "blue", "red")
expect(ab_user["link_color:1"]).to eq(alternative_name)
end
it "should load the experiment even if the version is not 0" do
experiment.reset
expect(experiment.version).to eq(1)
alternative_name = ab_test("link_color", "blue", "red")
expect(ab_user["link_color:1"]).to eq(alternative_name)
return_alternative_name = ab_test("link_color", "blue", "red")
expect(return_alternative_name).to eq(alternative_name)
end
it "should reset the session of a user on an older version of the experiment" do
alternative_name = ab_test("link_color", "blue", "red")
expect(ab_user["link_color"]).to eq(alternative_name)
alternative = Split::Alternative.new(alternative_name, "link_color")
expect(alternative.participant_count).to eq(1)
experiment.reset
expect(experiment.version).to eq(1)
alternative = Split::Alternative.new(alternative_name, "link_color")
expect(alternative.participant_count).to eq(0)
new_alternative_name = ab_test("link_color", "blue", "red")
expect(ab_user["link_color:1"]).to eq(new_alternative_name)
new_alternative = Split::Alternative.new(new_alternative_name, "link_color")
expect(new_alternative.participant_count).to eq(1)
end
it "should cleanup old versions of experiments from the session" do
alternative_name = ab_test("link_color", "blue", "red")
expect(ab_user["link_color"]).to eq(alternative_name)
alternative = Split::Alternative.new(alternative_name, "link_color")
expect(alternative.participant_count).to eq(1)
experiment.reset
expect(experiment.version).to eq(1)
alternative = Split::Alternative.new(alternative_name, "link_color")
expect(alternative.participant_count).to eq(0)
new_alternative_name = ab_test("link_color", "blue", "red")
expect(ab_user["link_color:1"]).to eq(new_alternative_name)
end
it "should only count completion of users on the current version" do
alternative_name = ab_test("link_color", "blue", "red")
expect(ab_user["link_color"]).to eq(alternative_name)
Split::Alternative.new(alternative_name, "link_color")
experiment.reset
expect(experiment.version).to eq(1)
ab_finished("link_color")
alternative = Split::Alternative.new(alternative_name, "link_color")
expect(alternative.completed_count).to eq(0)
end
end
context "when redis is not available" do
before(:each) do
expect(Split).to receive(:redis).at_most(5).times.and_raise(Errno::ECONNREFUSED.new)
end
context "and db_failover config option is turned off" do
before(:each) do
Split.configure do |config|
config.db_failover = false
end
end
describe "ab_test" do
it "should raise an exception" do
expect { ab_test("link_color", "blue", "red") }.to raise_error(Errno::ECONNREFUSED)
end
end
describe "finished" do
it "should raise an exception" do
expect { ab_finished("link_color") }.to raise_error(Errno::ECONNREFUSED)
end
end
describe "disable split testing" do
before(:each) do
Split.configure do |config|
config.enabled = false
end
end
it "should not attempt to connect to redis" do
expect { ab_test("link_color", "blue", "red") }.not_to raise_error
end
it "should return control variable" do
expect(ab_test("link_color", "blue", "red")).to eq("blue")
expect { ab_finished("link_color") }.not_to raise_error
end
end
end
context "and db_failover config option is turned on" do
before(:each) do
Split.configure do |config|
config.db_failover = true
end
end
describe "ab_test" do
it "should not raise an exception" do
expect { ab_test("link_color", "blue", "red") }.not_to raise_error
end
it "should call db_failover_on_db_error proc with error as parameter" do
Split.configure do |config|
config.db_failover_on_db_error = proc do |error|
expect(error).to be_a(Errno::ECONNREFUSED)
end
end
expect(Split.configuration.db_failover_on_db_error).to receive(:call).and_call_original
ab_test("link_color", "blue", "red")
end
it "should always use first alternative" do
expect(ab_test("link_color", "blue", "red")).to eq("blue")
expect(ab_test("link_color", { "blue" => 0.01 }, "red" => 0.2)).to eq("blue")
expect(ab_test("link_color", { "blue" => 0.8 }, { "red" => 20 })).to eq("blue")
expect(ab_test("link_color", "blue", "red") do |alternative|
"shared/#{alternative}"
end).to eq("shared/blue")
end
context "and db_failover_allow_parameter_override config option is turned on" do
before(:each) do
Split.configure do |config|
config.db_failover_allow_parameter_override = true
end
end
context "and given an override parameter" do
it "should use given override instead of the first alternative" do
@params = { "ab_test" => { "link_color" => "red" } }
expect(ab_test("link_color", "blue", "red")).to eq("red")
expect(ab_test("link_color", "blue", "red", "green")).to eq("red")
expect(ab_test("link_color", { "blue" => 0.01 }, "red" => 0.2)).to eq("red")
expect(ab_test("link_color", { "blue" => 0.8 }, { "red" => 20 })).to eq("red")
expect(ab_test("link_color", "blue", "red") do |alternative|
"shared/#{alternative}"
end).to eq("shared/red")
end
end
end
context "and preloaded config given" do
before do
Split.configuration.experiments[:link_color] = {
alternatives: [ "blue", "red" ],
}
end
it "uses first alternative" do
expect(ab_test(:link_color)).to eq("blue")
end
end
end
describe "finished" do
it "should not raise an exception" do
expect { ab_finished("link_color") }.not_to raise_error
end
it "should call db_failover_on_db_error proc with error as parameter" do
Split.configure do |config|
config.db_failover_on_db_error = proc do |error|
expect(error).to be_a(Errno::ECONNREFUSED)
end
end
expect(Split.configuration.db_failover_on_db_error).to receive(:call).and_call_original
ab_finished("link_color")
end
end
end
end
context "with preloaded config" do
before { Split.configuration.experiments = {} }
it "pulls options from config file" do
Split.configuration.experiments[:my_experiment] = {
alternatives: [ "control_opt", "other_opt" ],
goals: ["goal1", "goal2"]
}
ab_test :my_experiment
expect(Split::Experiment.new(:my_experiment).alternatives.map(&:name)).to eq([ "control_opt", "other_opt" ])
expect(Split::Experiment.new(:my_experiment).goals).to eq([ "goal1", "goal2" ])
end
it "can be called multiple times" do
Split.configuration.experiments[:my_experiment] = {
alternatives: [ "control_opt", "other_opt" ],
goals: ["goal1", "goal2"]
}
5.times { ab_test :my_experiment }
experiment = Split::Experiment.new(:my_experiment)
expect(experiment.alternatives.map(&:name)).to eq([ "control_opt", "other_opt" ])
expect(experiment.goals).to eq([ "goal1", "goal2" ])
expect(experiment.participant_count).to eq(1)
end
it "accepts multiple goals" do
Split.configuration.experiments[:my_experiment] = {
alternatives: [ "control_opt", "other_opt" ],
goals: [ "goal1", "goal2", "goal3" ]
}
ab_test :my_experiment
experiment = Split::Experiment.new(:my_experiment)
expect(experiment.goals).to eq([ "goal1", "goal2", "goal3" ])
end
it "allow specifying goals to be optional" do
Split.configuration.experiments[:my_experiment] = {
alternatives: [ "control_opt", "other_opt" ]
}
experiment = Split::Experiment.new(:my_experiment)
expect(experiment.goals).to eq([])
end
it "accepts multiple alternatives" do
Split.configuration.experiments[:my_experiment] = {
alternatives: [ "control_opt", "second_opt", "third_opt" ],
}
ab_test :my_experiment
experiment = Split::Experiment.new(:my_experiment)
expect(experiment.alternatives.map(&:name)).to eq([ "control_opt", "second_opt", "third_opt" ])
end
it "accepts probability on alternatives" do
Split.configuration.experiments[:my_experiment] = {
alternatives: [
{ name: "control_opt", percent: 67 },
{ name: "second_opt", percent: 10 },
{ name: "third_opt", percent: 23 },
],
}
ab_test :my_experiment
experiment = Split::Experiment.new(:my_experiment)
expect(experiment.alternatives.collect { |a| [a.name, a.weight] }).to eq([["control_opt", 0.67], ["second_opt", 0.1], ["third_opt", 0.23]])
end
it "accepts probability on some alternatives" do
Split.configuration.experiments[:my_experiment] = {
alternatives: [
{ name: "control_opt", percent: 34 },
"second_opt",
{ name: "third_opt", percent: 23 },
"fourth_opt",
],
}
ab_test :my_experiment
experiment = Split::Experiment.new(:my_experiment)
names_and_weights = experiment.alternatives.collect { |a| [a.name, a.weight] }
expect(names_and_weights).to eq([["control_opt", 0.34], ["second_opt", 0.215], ["third_opt", 0.23], ["fourth_opt", 0.215]])
expect(names_and_weights.inject(0) { |sum, nw| sum + nw[1] }).to eq(1.0)
end
it "allows name param without probability" do
Split.configuration.experiments[:my_experiment] = {
alternatives: [
{ name: "control_opt" },
"second_opt",
{ name: "third_opt", percent: 64 },
],
}
ab_test :my_experiment
experiment = Split::Experiment.new(:my_experiment)
names_and_weights = experiment.alternatives.collect { |a| [a.name, a.weight] }
expect(names_and_weights).to eq([["control_opt", 0.18], ["second_opt", 0.18], ["third_opt", 0.64]])
expect(names_and_weights.inject(0) { |sum, nw| sum + nw[1] }).to eq(1.0)
end
it "fails gracefully if config is missing experiment" do
Split.configuration.experiments = { other_experiment: { foo: "Bar" } }
expect { ab_test :my_experiment }.to raise_error(Split::ExperimentNotFound)
end
it "fails gracefully if config is missing" do
expect { Split.configuration.experiments = nil }.to raise_error(Split::InvalidExperimentsFormatError)
end
it "fails gracefully if config is missing alternatives" do
Split.configuration.experiments[:my_experiment] = { foo: "Bar" }
expect { ab_test :my_experiment }.to raise_error(NoMethodError)
end
end
it "should handle multiple experiments correctly" do
experiment2 = Split::ExperimentCatalog.find_or_create("link_color2", "blue", "red")
ab_test("link_color", "blue", "red")
ab_test("link_color2", "blue", "red")
ab_finished("link_color2")
experiment2.alternatives.each do |alt|
expect(alt.unfinished_count).to eq(0)
end
end
context "with goals" do
before do
@experiment = { "link_color" => ["purchase", "refund"] }
@alternatives = ["blue", "red"]
@experiment_name, @goals = normalize_metric(@experiment)
@goal1 = @goals[0]
@goal2 = @goals[1]
end
it "should normalize experiment" do
expect(@experiment_name).to eq("link_color")
expect(@goals).to eq(["purchase", "refund"])
end
describe "ab_test" do
it "should allow experiment goals interface as a single hash" do
ab_test(@experiment, *@alternatives)
experiment = Split::ExperimentCatalog.find("link_color")
expect(experiment.goals).to eq(["purchase", "refund"])
end
end
describe "ab_finished" do
before do
@alternative_name = ab_test(@experiment, *@alternatives)
end
it "should increment the counter for the specified-goal completed alternative" do
expect { ab_finished({ "link_color" => ["purchase"] }) }
.to change { Split::Alternative.new(@alternative_name, @experiment_name).completed_count(@goal2) }.by(0)
.and change { Split::Alternative.new(@alternative_name, @experiment_name).completed_count(@goal1) }.by(1)
end
end
end
end
<MSG> Merge pull request #83 from philnash/stop-overwriting-finished-values
Getting negative non-finished rates when using finished(:reset => false)
<DFF> @@ -109,6 +109,15 @@ describe Split::Helper do
button_size_alt = Split::Alternative.new(button_size, 'button_size')
button_size_alt.participant_count.should eql(1)
end
+
+ it "should not over-write a finished key when an experiment is on a later version" do
+ experiment = Split::Experiment.find_or_create('link_color', 'blue', 'red')
+ experiment.increment_version
+ session[:split] = { experiment.key => 'blue', experiment.finished_key => true }
+ finshed_session = session[:split].dup
+ ab_test('link_color', 'blue', 'red')
+ session[:split].should eql(finshed_session)
+ end
end
describe 'finished' do
| 9 | Merge pull request #83 from philnash/stop-overwriting-finished-values | 0 | .rb | rb | mit | splitrb/split |
10071051 | <NME> helper_spec.rb
<BEF> # frozen_string_literal: true
require "spec_helper"
# TODO change some of these tests to use Rack::Test
describe Split::Helper do
include Split::Helper
let(:experiment) {
Split::ExperimentCatalog.find_or_create("link_color", "blue", "red")
}
describe "ab_test" do
it "should not raise an error when passed strings for alternatives" do
expect { ab_test("xyz", "1", "2", "3") }.not_to raise_error
end
it "should not raise an error when passed an array for alternatives" do
expect { ab_test("xyz", ["1", "2", "3"]) }.not_to raise_error
end
it "should raise the appropriate error when passed integers for alternatives" do
expect { ab_test("xyz", 1, 2, 3) }.to raise_error(ArgumentError)
end
it "should raise the appropriate error when passed symbols for alternatives" do
expect { ab_test("xyz", :a, :b, :c) }.to raise_error(ArgumentError)
end
it "should not raise error when passed an array for goals" do
expect { ab_test({ "link_color" => ["purchase", "refund"] }, "blue", "red") }.not_to raise_error
end
it "should not raise error when passed just one goal" do
expect { ab_test({ "link_color" => "purchase" }, "blue", "red") }.not_to raise_error
end
it "raises an appropriate error when processing combined expirements" do
Split.configuration.experiments = {
combined_exp_1: {
alternatives: [ { name: "control", percent: 50 }, { name: "test-alt", percent: 50 } ],
metric: :my_metric,
combined_experiments: [:combined_exp_1_sub_1]
}
}
Split::ExperimentCatalog.find_or_create("combined_exp_1")
expect { ab_test("combined_exp_1") }.to raise_error(Split::InvalidExperimentsFormatError)
end
it "should assign a random alternative to a new user when there are an equal number of alternatives assigned" do
ab_test("link_color", "blue", "red")
expect(["red", "blue"]).to include(ab_user["link_color"])
end
it "should increment the participation counter after assignment to a new user" do
previous_red_count = Split::Alternative.new("red", "link_color").participant_count
previous_blue_count = Split::Alternative.new("blue", "link_color").participant_count
ab_test("link_color", "blue", "red")
new_red_count = Split::Alternative.new("red", "link_color").participant_count
new_blue_count = Split::Alternative.new("blue", "link_color").participant_count
expect((new_red_count + new_blue_count)).to eq(previous_red_count + previous_blue_count + 1)
end
it "should not increment the counter for an experiment that the user is not participating in" do
ab_test("link_color", "blue", "red")
e = Split::ExperimentCatalog.find_or_create("button_size", "small", "big")
expect {
# User shouldn't participate in this second experiment
ab_test("button_size", "small", "big")
}.not_to change { e.participant_count }
end
it "should not increment the counter for an ended experiment" do
e = Split::ExperimentCatalog.find_or_create("button_size", "small", "big")
e.winner = "small"
expect {
a = ab_test("button_size", "small", "big")
expect(a).to eq("small")
}.not_to change { e.participant_count }
end
it "should not increment the counter for an not started experiment" do
expect(Split.configuration).to receive(:start_manually).and_return(true)
e = Split::ExperimentCatalog.find_or_create("button_size", "small", "big")
expect {
a = ab_test("button_size", "small", "big")
expect(a).to eq("small")
}.not_to change { e.participant_count }
end
it "should return the given alternative for an existing user" do
expect(ab_test("link_color", "blue", "red")).to eq ab_test("link_color", "blue", "red")
end
it "should always return the winner if one is present" do
experiment.winner = "orange"
expect(ab_test("link_color", "blue", "red")).to eq("orange")
end
it "should allow the alternative to be forced by passing it in the params" do
# ?ab_test[link_color]=blue
@params = { "ab_test" => { "link_color" => "blue" } }
button_size_alt = Split::Alternative.new(button_size, 'button_size')
button_size_alt.participant_count.should eql(1)
end
end
describe 'finished' do
it "should not allow an arbitrary alternative" do
@params = { "ab_test" => { "link_color" => "pink" } }
alternative = ab_test("link_color", "blue")
expect(alternative).to eq("blue")
end
it "should not store the split when a param forced alternative" do
@params = { "ab_test" => { "link_color" => "blue" } }
expect(ab_user).not_to receive(:[]=)
ab_test("link_color", "blue", "red")
end
it "SPLIT_DISABLE query parameter should also force the alternative (uses control)" do
@params = { "SPLIT_DISABLE" => "true" }
alternative = ab_test("link_color", "blue", "red")
expect(alternative).to eq("blue")
alternative = ab_test("link_color", { "blue" => 1 }, "red" => 5)
expect(alternative).to eq("blue")
alternative = ab_test("link_color", "red", "blue")
expect(alternative).to eq("red")
alternative = ab_test("link_color", { "red" => 5 }, "blue" => 1)
expect(alternative).to eq("red")
end
it "should not store the split when Split generically disabled" do
@params = { "SPLIT_DISABLE" => "true" }
expect(ab_user).not_to receive(:[]=)
ab_test("link_color", "blue", "red")
end
context "when store_override is set" do
before { Split.configuration.store_override = true }
it "should store the forced alternative" do
@params = { "ab_test" => { "link_color" => "blue" } }
expect(ab_user).to receive(:[]=).with("link_color", "blue")
ab_test("link_color", "blue", "red")
end
end
context "when on_trial_choose is set" do
before { Split.configuration.on_trial_choose = :some_method }
it "should call the method" do
expect(self).to receive(:some_method)
ab_test("link_color", "blue", "red")
end
end
it "should allow passing a block" do
alt = ab_test("link_color", "blue", "red")
ret = ab_test("link_color", "blue", "red") { |alternative| "shared/#{alternative}" }
expect(ret).to eq("shared/#{alt}")
end
it "should allow the share of visitors see an alternative to be specified" do
ab_test("link_color", { "blue" => 0.8 }, { "red" => 20 })
expect(["red", "blue"]).to include(ab_user["link_color"])
end
it "should allow alternative weighting interface as a single hash" do
ab_test("link_color", { "blue" => 0.01 }, "red" => 0.2)
experiment = Split::ExperimentCatalog.find("link_color")
expect(experiment.alternatives.map(&:name)).to eq(["blue", "red"])
expect(experiment.alternatives.collect { |a| a.weight }).to match_array([0.01, 0.2])
end
it "should only let a user participate in one experiment at a time" do
link_color = ab_test("link_color", "blue", "red")
ab_test("button_size", "small", "big")
expect(ab_user["link_color"]).to eq(link_color)
big = Split::Alternative.new("big", "button_size")
expect(big.participant_count).to eq(0)
small = Split::Alternative.new("small", "button_size")
expect(small.participant_count).to eq(0)
end
it "should let a user participate in many experiment with allow_multiple_experiments option" do
Split.configure do |config|
config.allow_multiple_experiments = true
end
link_color = ab_test("link_color", "blue", "red")
button_size = ab_test("button_size", "small", "big")
expect(ab_user["link_color"]).to eq(link_color)
expect(ab_user["button_size"]).to eq(button_size)
button_size_alt = Split::Alternative.new(button_size, "button_size")
expect(button_size_alt.participant_count).to eq(1)
end
context "with allow_multiple_experiments = 'control'" do
it "should let a user participate in many experiment with one non-'control' alternative" do
Split.configure do |config|
config.allow_multiple_experiments = "control"
end
groups = 100.times.map do |n|
ab_test("test#{n}".to_sym, { "control" => (100 - n) }, { "test#{n}-alt" => n })
end
experiments = ab_user.active_experiments
expect(experiments.size).to be > 1
count_control = experiments.values.count { |g| g == "control" }
expect(count_control).to eq(experiments.size - 1)
count_alts = groups.count { |g| g != "control" }
expect(count_alts).to eq(1)
end
context "when user already has experiment" do
let(:mock_user) { Split::User.new(self, { "test_0" => "test-alt" }) }
before do
Split.configure do |config|
config.allow_multiple_experiments = "control"
end
Split::ExperimentCatalog.find_or_initialize("test_0", "control", "test-alt").save
Split::ExperimentCatalog.find_or_initialize("test_1", "control", "test-alt").save
end
it "should restore previously selected alternative" do
expect(ab_user.active_experiments.size).to eq 1
expect(ab_test(:test_0, { "control" => 100 }, { "test-alt" => 1 })).to eq "test-alt"
expect(ab_test(:test_0, { "control" => 1 }, { "test-alt" => 100 })).to eq "test-alt"
end
it "should select the correct alternatives after experiment resets" do
experiment = Split::ExperimentCatalog.find(:test_0)
experiment.reset
mock_user[experiment.key] = "test-alt"
expect(ab_user.active_experiments.size).to eq 1
expect(ab_test(:test_0, { "control" => 100 }, { "test-alt" => 1 })).to eq "test-alt"
expect(ab_test(:test_0, { "control" => 0 }, { "test-alt" => 100 })).to eq "test-alt"
end
it "lets override existing choice" do
pending "this requires user store reset on first call not depending on whelther it is current trial"
@params = { "ab_test" => { "test_1" => "test-alt" } }
expect(ab_test(:test_0, { "control" => 0 }, { "test-alt" => 100 })).to eq "control"
expect(ab_test(:test_1, { "control" => 100 }, { "test-alt" => 1 })).to eq "test-alt"
end
end
end
it "should not over-write a finished key when an experiment is on a later version" do
experiment.increment_version
ab_user = { experiment.key => "blue", experiment.finished_key => true }
finished_session = ab_user.dup
ab_test("link_color", "blue", "red")
expect(ab_user).to eq(finished_session)
end
end
describe "metadata" do
context "is defined" do
before do
Split.configuration.experiments = {
my_experiment: {
alternatives: ["one", "two"],
resettable: false,
metadata: { "one" => "Meta1", "two" => "Meta2" }
}
}
end
it "should be passed to helper block" do
@params = { "ab_test" => { "my_experiment" => "two" } }
expect(ab_test("my_experiment")).to eq "two"
expect(ab_test("my_experiment") do |alternative, meta|
meta
end).to eq("Meta2")
end
it "should pass control metadata helper block if library disabled" do
Split.configure do |config|
config.enabled = false
end
expect(ab_test("my_experiment")).to eq "one"
expect(ab_test("my_experiment") do |_, meta|
meta
end).to eq("Meta1")
end
end
context "is not defined" do
before do
Split.configuration.experiments = {
my_experiment: {
alternatives: ["one", "two"],
resettable: false,
metadata: nil
}
}
end
it "should be passed to helper block" do
expect(ab_test("my_experiment") do |alternative, meta|
meta
end).to eq({})
end
it "should pass control metadata helper block if library disabled" do
Split.configure do |config|
config.enabled = false
end
expect(ab_test("my_experiment") do |_, meta|
meta
end).to eq({})
end
end
end
describe "ab_finished" do
context "for an experiment that the user participates in" do
before(:each) do
@experiment_name = "link_color"
@alternatives = ["blue", "red"]
@experiment = Split::ExperimentCatalog.find_or_create(@experiment_name, *@alternatives)
@alternative_name = ab_test(@experiment_name, *@alternatives)
@previous_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count
end
it "should increment the counter for the completed alternative" do
ab_finished(@experiment_name)
new_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count
expect(new_completion_count).to eq(@previous_completion_count + 1)
end
it "should set experiment's finished key if reset is false" do
ab_finished(@experiment_name, { reset: false })
expect(ab_user[@experiment.key]).to eq(@alternative_name)
expect(ab_user[@experiment.finished_key]).to eq(true)
end
it "should not increment the counter if reset is false and the experiment has been already finished" do
2.times { ab_finished(@experiment_name, { reset: false }) }
new_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count
expect(new_completion_count).to eq(@previous_completion_count + 1)
end
it "should not increment the counter for an ended experiment" do
e = Split::ExperimentCatalog.find_or_create("button_size", "small", "big")
e.winner = "small"
a = ab_test("button_size", "small", "big")
expect(a).to eq("small")
expect {
ab_finished("button_size")
}.not_to change { Split::Alternative.new(a, "button_size").completed_count }
end
it "should clear out the user's participation from their session" do
expect(ab_user[@experiment.key]).to eq(@alternative_name)
ab_finished(@experiment_name)
expect(ab_user.keys).to be_empty
end
it "should not clear out the users session if reset is false" do
expect(ab_user[@experiment.key]).to eq(@alternative_name)
ab_finished(@experiment_name, { reset: false })
expect(ab_user[@experiment.key]).to eq(@alternative_name)
expect(ab_user[@experiment.finished_key]).to eq(true)
end
it "should reset the users session when experiment is not versioned" do
expect(ab_user[@experiment.key]).to eq(@alternative_name)
ab_finished(@experiment_name)
expect(ab_user.keys).to be_empty
end
it "should reset the users session when experiment is versioned" do
@experiment.increment_version
@alternative_name = ab_test(@experiment_name, *@alternatives)
expect(ab_user[@experiment.key]).to eq(@alternative_name)
ab_finished(@experiment_name)
expect(ab_user.keys).to be_empty
end
context "when on_trial_complete is set" do
before { Split.configuration.on_trial_complete = :some_method }
it "should call the method" do
expect(self).to receive(:some_method)
ab_finished(@experiment_name)
end
it "should not call the method without alternative" do
ab_user[@experiment.key] = nil
expect(self).not_to receive(:some_method)
ab_finished(@experiment_name)
end
end
end
context "for an experiment that the user is excluded from" do
before do
alternative = ab_test("link_color", "blue", "red")
expect(Split::Alternative.new(alternative, "link_color").participant_count).to eq(1)
alternative = ab_test("button_size", "small", "big")
expect(Split::Alternative.new(alternative, "button_size").participant_count).to eq(0)
end
it "should not increment the completed counter" do
# So, user should be participating in the link_color experiment and
# receive the control for button_size. As the user is not participating in
# the button size experiment, finishing it should not increase the
# completion count for that alternative.
expect {
ab_finished("button_size")
}.not_to change { Split::Alternative.new("small", "button_size").completed_count }
end
end
context "for an experiment that the user does not participate in" do
before do
Split::ExperimentCatalog.find_or_create(:not_started_experiment, "control", "alt")
end
it "should not raise an exception" do
expect { ab_finished(:not_started_experiment) }.not_to raise_exception
end
it "should not change the user state when reset is false" do
expect { ab_finished(:not_started_experiment, reset: false) }.not_to change { ab_user.keys }.from([])
end
it "should not change the user state when reset is true" do
expect(self).not_to receive(:reset!)
ab_finished(:not_started_experiment)
end
it "should not increment the completed counter" do
ab_finished(:not_started_experiment)
expect(Split::Alternative.new("control", :not_started_experiment).completed_count).to eq(0)
expect(Split::Alternative.new("alt", :not_started_experiment).completed_count).to eq(0)
end
end
end
context "finished with config" do
it "passes reset option" do
Split.configuration.experiments = {
my_experiment: {
alternatives: ["one", "two"],
resettable: false,
}
}
alternative = ab_test(:my_experiment)
experiment = Split::ExperimentCatalog.find :my_experiment
ab_finished :my_experiment
expect(ab_user[experiment.key]).to eq(alternative)
expect(ab_user[experiment.finished_key]).to eq(true)
end
end
context "finished with metric name" do
before { Split.configuration.experiments = {} }
before { expect(Split::Alternative).to receive(:new).at_least(1).times.and_call_original }
def should_finish_experiment(experiment_name, should_finish = true)
alts = Split.configuration.experiments[experiment_name][:alternatives]
experiment = Split::ExperimentCatalog.find_or_create(experiment_name, *alts)
alt_name = ab_user[experiment.key] = alts.first
alt = double("alternative")
expect(alt).to receive(:name).at_most(1).times.and_return(alt_name)
expect(Split::Alternative).to receive(:new).at_most(1).times.with(alt_name, experiment_name.to_s).and_return(alt)
if should_finish
expect(alt).to receive(:increment_completion).at_most(1).times
else
expect(alt).not_to receive(:increment_completion)
end
end
it "completes the test" do
Split.configuration.experiments[:my_experiment] = {
alternatives: [ "control_opt", "other_opt" ],
metric: :my_metric
}
should_finish_experiment :my_experiment
ab_finished :my_metric
end
it "completes all relevant tests" do
Split.configuration.experiments = {
exp_1: {
alternatives: [ "1-1", "1-2" ],
metric: :my_metric
},
exp_2: {
alternatives: [ "2-1", "2-2" ],
metric: :another_metric
},
exp_3: {
alternatives: [ "3-1", "3-2" ],
metric: :my_metric
},
}
should_finish_experiment :exp_1
should_finish_experiment :exp_2, false
should_finish_experiment :exp_3
ab_finished :my_metric
end
it "passes reset option" do
Split.configuration.experiments = {
my_exp: {
alternatives: ["one", "two"],
metric: :my_metric,
resettable: false,
}
}
alternative_name = ab_test(:my_exp)
exp = Split::ExperimentCatalog.find :my_exp
ab_finished :my_metric
expect(ab_user[exp.key]).to eq(alternative_name)
expect(ab_user[exp.finished_key]).to be_truthy
end
it "passes through options" do
Split.configuration.experiments = {
my_exp: {
alternatives: ["one", "two"],
metric: :my_metric,
}
}
alternative_name = ab_test(:my_exp)
exp = Split::ExperimentCatalog.find :my_exp
ab_finished :my_metric, reset: false
expect(ab_user[exp.key]).to eq(alternative_name)
expect(ab_user[exp.finished_key]).to be_truthy
end
end
describe "conversions" do
it "should return a conversion rate for an alternative" do
alternative_name = ab_test("link_color", "blue", "red")
previous_convertion_rate = Split::Alternative.new(alternative_name, "link_color").conversion_rate
expect(previous_convertion_rate).to eq(0.0)
ab_finished("link_color")
new_convertion_rate = Split::Alternative.new(alternative_name, "link_color").conversion_rate
expect(new_convertion_rate).to eq(1.0)
end
end
describe "active experiments" do
it "should show an active test" do
alternative = ab_test("def", "4", "5", "6")
expect(active_experiments.count).to eq 1
expect(active_experiments.first[0]).to eq "def"
expect(active_experiments.first[1]).to eq alternative
end
it "should show a finished test" do
alternative = ab_test("def", "4", "5", "6")
ab_finished("def", { reset: false })
expect(active_experiments.count).to eq 1
expect(active_experiments.first[0]).to eq "def"
expect(active_experiments.first[1]).to eq alternative
end
it "should show an active test when an experiment is on a later version" do
experiment.reset
expect(experiment.version).to eq(1)
ab_test("link_color", "blue", "red")
expect(active_experiments.count).to eq 1
expect(active_experiments.first[0]).to eq "link_color"
end
it "should show versioned tests properly" do
10.times { experiment.reset }
alternative = ab_test(experiment.name, "blue", "red")
ab_finished(experiment.name, reset: false)
expect(experiment.version).to eq(10)
expect(active_experiments.count).to eq 1
expect(active_experiments).to eq({ "link_color" => alternative })
end
it "should show multiple tests" do
Split.configure do |config|
config.allow_multiple_experiments = true
end
alternative = ab_test("def", "4", "5", "6")
another_alternative = ab_test("ghi", "7", "8", "9")
expect(active_experiments.count).to eq 2
expect(active_experiments["def"]).to eq alternative
expect(active_experiments["ghi"]).to eq another_alternative
end
it "should not show tests with winners" do
Split.configure do |config|
config.allow_multiple_experiments = true
end
e = Split::ExperimentCatalog.find_or_create("def", "4", "5", "6")
e.winner = "4"
ab_test("def", "4", "5", "6")
another_alternative = ab_test("ghi", "7", "8", "9")
expect(active_experiments.count).to eq 1
expect(active_experiments.first[0]).to eq "ghi"
expect(active_experiments.first[1]).to eq another_alternative
end
end
describe "when user is a robot" do
before(:each) do
@request = OpenStruct.new(user_agent: "Googlebot/2.1 (+http://www.google.com/bot.html)")
end
describe "ab_test" do
it "should return the control" do
alternative = ab_test("link_color", "blue", "red")
expect(alternative).to eq experiment.control.name
end
it "should not create a experiment" do
ab_test("link_color", "blue", "red")
expect(Split::Experiment.new("link_color")).to be_a_new_record
end
it "should not increment the participation count" do
previous_red_count = Split::Alternative.new("red", "link_color").participant_count
previous_blue_count = Split::Alternative.new("blue", "link_color").participant_count
ab_test("link_color", "blue", "red")
new_red_count = Split::Alternative.new("red", "link_color").participant_count
new_blue_count = Split::Alternative.new("blue", "link_color").participant_count
expect((new_red_count + new_blue_count)).to eq(previous_red_count + previous_blue_count)
end
end
describe "finished" do
it "should not increment the completed count" do
alternative_name = ab_test("link_color", "blue", "red")
previous_completion_count = Split::Alternative.new(alternative_name, "link_color").completed_count
ab_finished("link_color")
new_completion_count = Split::Alternative.new(alternative_name, "link_color").completed_count
expect(new_completion_count).to eq(previous_completion_count)
end
end
end
describe "when providing custom ignore logic" do
context "using a proc to configure custom logic" do
before(:each) do
Split.configure do |c|
c.ignore_filter = proc { |request| true } # ignore everything
end
end
it "ignores the ab_test" do
ab_test("link_color", "blue", "red")
red_count = Split::Alternative.new("red", "link_color").participant_count
blue_count = Split::Alternative.new("blue", "link_color").participant_count
expect((red_count + blue_count)).to be(0)
end
end
end
shared_examples_for "a disabled test" do
describe "ab_test" do
it "should return the control" do
alternative = ab_test("link_color", "blue", "red")
expect(alternative).to eq experiment.control.name
end
it "should not increment the participation count" do
previous_red_count = Split::Alternative.new("red", "link_color").participant_count
previous_blue_count = Split::Alternative.new("blue", "link_color").participant_count
ab_test("link_color", "blue", "red")
new_red_count = Split::Alternative.new("red", "link_color").participant_count
new_blue_count = Split::Alternative.new("blue", "link_color").participant_count
expect((new_red_count + new_blue_count)).to eq(previous_red_count + previous_blue_count)
end
end
describe "finished" do
it "should not increment the completed count" do
alternative_name = ab_test("link_color", "blue", "red")
previous_completion_count = Split::Alternative.new(alternative_name, "link_color").completed_count
ab_finished("link_color")
new_completion_count = Split::Alternative.new(alternative_name, "link_color").completed_count
expect(new_completion_count).to eq(previous_completion_count)
end
end
end
describe "when ip address is ignored" do
context "individually" do
before(:each) do
@request = OpenStruct.new(ip: "81.19.48.130")
Split.configure do |c|
c.ignore_ip_addresses << "81.19.48.130"
end
end
it_behaves_like "a disabled test"
end
context "for a range" do
before(:each) do
@request = OpenStruct.new(ip: "81.19.48.129")
Split.configure do |c|
c.ignore_ip_addresses << /81\.19\.48\.[0-9]+/
end
end
it_behaves_like "a disabled test"
end
context "using both a range and a specific value" do
before(:each) do
@request = OpenStruct.new(ip: "81.19.48.128")
Split.configure do |c|
c.ignore_ip_addresses << "81.19.48.130"
c.ignore_ip_addresses << /81\.19\.48\.[0-9]+/
end
end
it_behaves_like "a disabled test"
end
context "when ignored other address" do
before do
@request = OpenStruct.new(ip: "1.1.1.1")
Split.configure do |c|
c.ignore_ip_addresses << "81.19.48.130"
end
end
it "works as usual" do
alternative_name = ab_test("link_color", "red", "blue")
expect {
ab_finished("link_color")
}.to change(Split::Alternative.new(alternative_name, "link_color"), :completed_count).by(1)
end
end
end
describe "when user is previewing" do
before(:each) do
@request = OpenStruct.new(headers: { "x-purpose" => "preview" })
end
it_behaves_like "a disabled test"
end
describe "versioned experiments" do
it "should use version zero if no version is present" do
alternative_name = ab_test("link_color", "blue", "red")
expect(experiment.version).to eq(0)
expect(ab_user["link_color"]).to eq(alternative_name)
end
it "should save the version of the experiment to the session" do
experiment.reset
expect(experiment.version).to eq(1)
alternative_name = ab_test("link_color", "blue", "red")
expect(ab_user["link_color:1"]).to eq(alternative_name)
end
it "should load the experiment even if the version is not 0" do
experiment.reset
expect(experiment.version).to eq(1)
alternative_name = ab_test("link_color", "blue", "red")
expect(ab_user["link_color:1"]).to eq(alternative_name)
return_alternative_name = ab_test("link_color", "blue", "red")
expect(return_alternative_name).to eq(alternative_name)
end
it "should reset the session of a user on an older version of the experiment" do
alternative_name = ab_test("link_color", "blue", "red")
expect(ab_user["link_color"]).to eq(alternative_name)
alternative = Split::Alternative.new(alternative_name, "link_color")
expect(alternative.participant_count).to eq(1)
experiment.reset
expect(experiment.version).to eq(1)
alternative = Split::Alternative.new(alternative_name, "link_color")
expect(alternative.participant_count).to eq(0)
new_alternative_name = ab_test("link_color", "blue", "red")
expect(ab_user["link_color:1"]).to eq(new_alternative_name)
new_alternative = Split::Alternative.new(new_alternative_name, "link_color")
expect(new_alternative.participant_count).to eq(1)
end
it "should cleanup old versions of experiments from the session" do
alternative_name = ab_test("link_color", "blue", "red")
expect(ab_user["link_color"]).to eq(alternative_name)
alternative = Split::Alternative.new(alternative_name, "link_color")
expect(alternative.participant_count).to eq(1)
experiment.reset
expect(experiment.version).to eq(1)
alternative = Split::Alternative.new(alternative_name, "link_color")
expect(alternative.participant_count).to eq(0)
new_alternative_name = ab_test("link_color", "blue", "red")
expect(ab_user["link_color:1"]).to eq(new_alternative_name)
end
it "should only count completion of users on the current version" do
alternative_name = ab_test("link_color", "blue", "red")
expect(ab_user["link_color"]).to eq(alternative_name)
Split::Alternative.new(alternative_name, "link_color")
experiment.reset
expect(experiment.version).to eq(1)
ab_finished("link_color")
alternative = Split::Alternative.new(alternative_name, "link_color")
expect(alternative.completed_count).to eq(0)
end
end
context "when redis is not available" do
before(:each) do
expect(Split).to receive(:redis).at_most(5).times.and_raise(Errno::ECONNREFUSED.new)
end
context "and db_failover config option is turned off" do
before(:each) do
Split.configure do |config|
config.db_failover = false
end
end
describe "ab_test" do
it "should raise an exception" do
expect { ab_test("link_color", "blue", "red") }.to raise_error(Errno::ECONNREFUSED)
end
end
describe "finished" do
it "should raise an exception" do
expect { ab_finished("link_color") }.to raise_error(Errno::ECONNREFUSED)
end
end
describe "disable split testing" do
before(:each) do
Split.configure do |config|
config.enabled = false
end
end
it "should not attempt to connect to redis" do
expect { ab_test("link_color", "blue", "red") }.not_to raise_error
end
it "should return control variable" do
expect(ab_test("link_color", "blue", "red")).to eq("blue")
expect { ab_finished("link_color") }.not_to raise_error
end
end
end
context "and db_failover config option is turned on" do
before(:each) do
Split.configure do |config|
config.db_failover = true
end
end
describe "ab_test" do
it "should not raise an exception" do
expect { ab_test("link_color", "blue", "red") }.not_to raise_error
end
it "should call db_failover_on_db_error proc with error as parameter" do
Split.configure do |config|
config.db_failover_on_db_error = proc do |error|
expect(error).to be_a(Errno::ECONNREFUSED)
end
end
expect(Split.configuration.db_failover_on_db_error).to receive(:call).and_call_original
ab_test("link_color", "blue", "red")
end
it "should always use first alternative" do
expect(ab_test("link_color", "blue", "red")).to eq("blue")
expect(ab_test("link_color", { "blue" => 0.01 }, "red" => 0.2)).to eq("blue")
expect(ab_test("link_color", { "blue" => 0.8 }, { "red" => 20 })).to eq("blue")
expect(ab_test("link_color", "blue", "red") do |alternative|
"shared/#{alternative}"
end).to eq("shared/blue")
end
context "and db_failover_allow_parameter_override config option is turned on" do
before(:each) do
Split.configure do |config|
config.db_failover_allow_parameter_override = true
end
end
context "and given an override parameter" do
it "should use given override instead of the first alternative" do
@params = { "ab_test" => { "link_color" => "red" } }
expect(ab_test("link_color", "blue", "red")).to eq("red")
expect(ab_test("link_color", "blue", "red", "green")).to eq("red")
expect(ab_test("link_color", { "blue" => 0.01 }, "red" => 0.2)).to eq("red")
expect(ab_test("link_color", { "blue" => 0.8 }, { "red" => 20 })).to eq("red")
expect(ab_test("link_color", "blue", "red") do |alternative|
"shared/#{alternative}"
end).to eq("shared/red")
end
end
end
context "and preloaded config given" do
before do
Split.configuration.experiments[:link_color] = {
alternatives: [ "blue", "red" ],
}
end
it "uses first alternative" do
expect(ab_test(:link_color)).to eq("blue")
end
end
end
describe "finished" do
it "should not raise an exception" do
expect { ab_finished("link_color") }.not_to raise_error
end
it "should call db_failover_on_db_error proc with error as parameter" do
Split.configure do |config|
config.db_failover_on_db_error = proc do |error|
expect(error).to be_a(Errno::ECONNREFUSED)
end
end
expect(Split.configuration.db_failover_on_db_error).to receive(:call).and_call_original
ab_finished("link_color")
end
end
end
end
context "with preloaded config" do
before { Split.configuration.experiments = {} }
it "pulls options from config file" do
Split.configuration.experiments[:my_experiment] = {
alternatives: [ "control_opt", "other_opt" ],
goals: ["goal1", "goal2"]
}
ab_test :my_experiment
expect(Split::Experiment.new(:my_experiment).alternatives.map(&:name)).to eq([ "control_opt", "other_opt" ])
expect(Split::Experiment.new(:my_experiment).goals).to eq([ "goal1", "goal2" ])
end
it "can be called multiple times" do
Split.configuration.experiments[:my_experiment] = {
alternatives: [ "control_opt", "other_opt" ],
goals: ["goal1", "goal2"]
}
5.times { ab_test :my_experiment }
experiment = Split::Experiment.new(:my_experiment)
expect(experiment.alternatives.map(&:name)).to eq([ "control_opt", "other_opt" ])
expect(experiment.goals).to eq([ "goal1", "goal2" ])
expect(experiment.participant_count).to eq(1)
end
it "accepts multiple goals" do
Split.configuration.experiments[:my_experiment] = {
alternatives: [ "control_opt", "other_opt" ],
goals: [ "goal1", "goal2", "goal3" ]
}
ab_test :my_experiment
experiment = Split::Experiment.new(:my_experiment)
expect(experiment.goals).to eq([ "goal1", "goal2", "goal3" ])
end
it "allow specifying goals to be optional" do
Split.configuration.experiments[:my_experiment] = {
alternatives: [ "control_opt", "other_opt" ]
}
experiment = Split::Experiment.new(:my_experiment)
expect(experiment.goals).to eq([])
end
it "accepts multiple alternatives" do
Split.configuration.experiments[:my_experiment] = {
alternatives: [ "control_opt", "second_opt", "third_opt" ],
}
ab_test :my_experiment
experiment = Split::Experiment.new(:my_experiment)
expect(experiment.alternatives.map(&:name)).to eq([ "control_opt", "second_opt", "third_opt" ])
end
it "accepts probability on alternatives" do
Split.configuration.experiments[:my_experiment] = {
alternatives: [
{ name: "control_opt", percent: 67 },
{ name: "second_opt", percent: 10 },
{ name: "third_opt", percent: 23 },
],
}
ab_test :my_experiment
experiment = Split::Experiment.new(:my_experiment)
expect(experiment.alternatives.collect { |a| [a.name, a.weight] }).to eq([["control_opt", 0.67], ["second_opt", 0.1], ["third_opt", 0.23]])
end
it "accepts probability on some alternatives" do
Split.configuration.experiments[:my_experiment] = {
alternatives: [
{ name: "control_opt", percent: 34 },
"second_opt",
{ name: "third_opt", percent: 23 },
"fourth_opt",
],
}
ab_test :my_experiment
experiment = Split::Experiment.new(:my_experiment)
names_and_weights = experiment.alternatives.collect { |a| [a.name, a.weight] }
expect(names_and_weights).to eq([["control_opt", 0.34], ["second_opt", 0.215], ["third_opt", 0.23], ["fourth_opt", 0.215]])
expect(names_and_weights.inject(0) { |sum, nw| sum + nw[1] }).to eq(1.0)
end
it "allows name param without probability" do
Split.configuration.experiments[:my_experiment] = {
alternatives: [
{ name: "control_opt" },
"second_opt",
{ name: "third_opt", percent: 64 },
],
}
ab_test :my_experiment
experiment = Split::Experiment.new(:my_experiment)
names_and_weights = experiment.alternatives.collect { |a| [a.name, a.weight] }
expect(names_and_weights).to eq([["control_opt", 0.18], ["second_opt", 0.18], ["third_opt", 0.64]])
expect(names_and_weights.inject(0) { |sum, nw| sum + nw[1] }).to eq(1.0)
end
it "fails gracefully if config is missing experiment" do
Split.configuration.experiments = { other_experiment: { foo: "Bar" } }
expect { ab_test :my_experiment }.to raise_error(Split::ExperimentNotFound)
end
it "fails gracefully if config is missing" do
expect { Split.configuration.experiments = nil }.to raise_error(Split::InvalidExperimentsFormatError)
end
it "fails gracefully if config is missing alternatives" do
Split.configuration.experiments[:my_experiment] = { foo: "Bar" }
expect { ab_test :my_experiment }.to raise_error(NoMethodError)
end
end
it "should handle multiple experiments correctly" do
experiment2 = Split::ExperimentCatalog.find_or_create("link_color2", "blue", "red")
ab_test("link_color", "blue", "red")
ab_test("link_color2", "blue", "red")
ab_finished("link_color2")
experiment2.alternatives.each do |alt|
expect(alt.unfinished_count).to eq(0)
end
end
context "with goals" do
before do
@experiment = { "link_color" => ["purchase", "refund"] }
@alternatives = ["blue", "red"]
@experiment_name, @goals = normalize_metric(@experiment)
@goal1 = @goals[0]
@goal2 = @goals[1]
end
it "should normalize experiment" do
expect(@experiment_name).to eq("link_color")
expect(@goals).to eq(["purchase", "refund"])
end
describe "ab_test" do
it "should allow experiment goals interface as a single hash" do
ab_test(@experiment, *@alternatives)
experiment = Split::ExperimentCatalog.find("link_color")
expect(experiment.goals).to eq(["purchase", "refund"])
end
end
describe "ab_finished" do
before do
@alternative_name = ab_test(@experiment, *@alternatives)
end
it "should increment the counter for the specified-goal completed alternative" do
expect { ab_finished({ "link_color" => ["purchase"] }) }
.to change { Split::Alternative.new(@alternative_name, @experiment_name).completed_count(@goal2) }.by(0)
.and change { Split::Alternative.new(@alternative_name, @experiment_name).completed_count(@goal1) }.by(1)
end
end
end
end
<MSG> Merge pull request #83 from philnash/stop-overwriting-finished-values
Getting negative non-finished rates when using finished(:reset => false)
<DFF> @@ -109,6 +109,15 @@ describe Split::Helper do
button_size_alt = Split::Alternative.new(button_size, 'button_size')
button_size_alt.participant_count.should eql(1)
end
+
+ it "should not over-write a finished key when an experiment is on a later version" do
+ experiment = Split::Experiment.find_or_create('link_color', 'blue', 'red')
+ experiment.increment_version
+ session[:split] = { experiment.key => 'blue', experiment.finished_key => true }
+ finshed_session = session[:split].dup
+ ab_test('link_color', 'blue', 'red')
+ session[:split].should eql(finshed_session)
+ end
end
describe 'finished' do
| 9 | Merge pull request #83 from philnash/stop-overwriting-finished-values | 0 | .rb | rb | mit | splitrb/split |
10071052 | <NME> helper_spec.rb
<BEF> # frozen_string_literal: true
require "spec_helper"
# TODO change some of these tests to use Rack::Test
describe Split::Helper do
include Split::Helper
let(:experiment) {
Split::ExperimentCatalog.find_or_create("link_color", "blue", "red")
}
describe "ab_test" do
it "should not raise an error when passed strings for alternatives" do
expect { ab_test("xyz", "1", "2", "3") }.not_to raise_error
end
it "should not raise an error when passed an array for alternatives" do
expect { ab_test("xyz", ["1", "2", "3"]) }.not_to raise_error
end
it "should raise the appropriate error when passed integers for alternatives" do
expect { ab_test("xyz", 1, 2, 3) }.to raise_error(ArgumentError)
end
it "should raise the appropriate error when passed symbols for alternatives" do
expect { ab_test("xyz", :a, :b, :c) }.to raise_error(ArgumentError)
end
it "should not raise error when passed an array for goals" do
expect { ab_test({ "link_color" => ["purchase", "refund"] }, "blue", "red") }.not_to raise_error
end
it "should not raise error when passed just one goal" do
expect { ab_test({ "link_color" => "purchase" }, "blue", "red") }.not_to raise_error
end
it "raises an appropriate error when processing combined expirements" do
Split.configuration.experiments = {
combined_exp_1: {
alternatives: [ { name: "control", percent: 50 }, { name: "test-alt", percent: 50 } ],
metric: :my_metric,
combined_experiments: [:combined_exp_1_sub_1]
}
}
Split::ExperimentCatalog.find_or_create("combined_exp_1")
expect { ab_test("combined_exp_1") }.to raise_error(Split::InvalidExperimentsFormatError)
end
it "should assign a random alternative to a new user when there are an equal number of alternatives assigned" do
ab_test("link_color", "blue", "red")
expect(["red", "blue"]).to include(ab_user["link_color"])
end
it "should increment the participation counter after assignment to a new user" do
previous_red_count = Split::Alternative.new("red", "link_color").participant_count
previous_blue_count = Split::Alternative.new("blue", "link_color").participant_count
ab_test("link_color", "blue", "red")
new_red_count = Split::Alternative.new("red", "link_color").participant_count
new_blue_count = Split::Alternative.new("blue", "link_color").participant_count
expect((new_red_count + new_blue_count)).to eq(previous_red_count + previous_blue_count + 1)
end
it "should not increment the counter for an experiment that the user is not participating in" do
ab_test("link_color", "blue", "red")
e = Split::ExperimentCatalog.find_or_create("button_size", "small", "big")
expect {
# User shouldn't participate in this second experiment
ab_test("button_size", "small", "big")
}.not_to change { e.participant_count }
end
it "should not increment the counter for an ended experiment" do
e = Split::ExperimentCatalog.find_or_create("button_size", "small", "big")
e.winner = "small"
expect {
a = ab_test("button_size", "small", "big")
expect(a).to eq("small")
}.not_to change { e.participant_count }
end
it "should not increment the counter for an not started experiment" do
expect(Split.configuration).to receive(:start_manually).and_return(true)
e = Split::ExperimentCatalog.find_or_create("button_size", "small", "big")
expect {
a = ab_test("button_size", "small", "big")
expect(a).to eq("small")
}.not_to change { e.participant_count }
end
it "should return the given alternative for an existing user" do
expect(ab_test("link_color", "blue", "red")).to eq ab_test("link_color", "blue", "red")
end
it "should always return the winner if one is present" do
experiment.winner = "orange"
expect(ab_test("link_color", "blue", "red")).to eq("orange")
end
it "should allow the alternative to be forced by passing it in the params" do
# ?ab_test[link_color]=blue
@params = { "ab_test" => { "link_color" => "blue" } }
button_size_alt = Split::Alternative.new(button_size, 'button_size')
button_size_alt.participant_count.should eql(1)
end
end
describe 'finished' do
it "should not allow an arbitrary alternative" do
@params = { "ab_test" => { "link_color" => "pink" } }
alternative = ab_test("link_color", "blue")
expect(alternative).to eq("blue")
end
it "should not store the split when a param forced alternative" do
@params = { "ab_test" => { "link_color" => "blue" } }
expect(ab_user).not_to receive(:[]=)
ab_test("link_color", "blue", "red")
end
it "SPLIT_DISABLE query parameter should also force the alternative (uses control)" do
@params = { "SPLIT_DISABLE" => "true" }
alternative = ab_test("link_color", "blue", "red")
expect(alternative).to eq("blue")
alternative = ab_test("link_color", { "blue" => 1 }, "red" => 5)
expect(alternative).to eq("blue")
alternative = ab_test("link_color", "red", "blue")
expect(alternative).to eq("red")
alternative = ab_test("link_color", { "red" => 5 }, "blue" => 1)
expect(alternative).to eq("red")
end
it "should not store the split when Split generically disabled" do
@params = { "SPLIT_DISABLE" => "true" }
expect(ab_user).not_to receive(:[]=)
ab_test("link_color", "blue", "red")
end
context "when store_override is set" do
before { Split.configuration.store_override = true }
it "should store the forced alternative" do
@params = { "ab_test" => { "link_color" => "blue" } }
expect(ab_user).to receive(:[]=).with("link_color", "blue")
ab_test("link_color", "blue", "red")
end
end
context "when on_trial_choose is set" do
before { Split.configuration.on_trial_choose = :some_method }
it "should call the method" do
expect(self).to receive(:some_method)
ab_test("link_color", "blue", "red")
end
end
it "should allow passing a block" do
alt = ab_test("link_color", "blue", "red")
ret = ab_test("link_color", "blue", "red") { |alternative| "shared/#{alternative}" }
expect(ret).to eq("shared/#{alt}")
end
it "should allow the share of visitors see an alternative to be specified" do
ab_test("link_color", { "blue" => 0.8 }, { "red" => 20 })
expect(["red", "blue"]).to include(ab_user["link_color"])
end
it "should allow alternative weighting interface as a single hash" do
ab_test("link_color", { "blue" => 0.01 }, "red" => 0.2)
experiment = Split::ExperimentCatalog.find("link_color")
expect(experiment.alternatives.map(&:name)).to eq(["blue", "red"])
expect(experiment.alternatives.collect { |a| a.weight }).to match_array([0.01, 0.2])
end
it "should only let a user participate in one experiment at a time" do
link_color = ab_test("link_color", "blue", "red")
ab_test("button_size", "small", "big")
expect(ab_user["link_color"]).to eq(link_color)
big = Split::Alternative.new("big", "button_size")
expect(big.participant_count).to eq(0)
small = Split::Alternative.new("small", "button_size")
expect(small.participant_count).to eq(0)
end
it "should let a user participate in many experiment with allow_multiple_experiments option" do
Split.configure do |config|
config.allow_multiple_experiments = true
end
link_color = ab_test("link_color", "blue", "red")
button_size = ab_test("button_size", "small", "big")
expect(ab_user["link_color"]).to eq(link_color)
expect(ab_user["button_size"]).to eq(button_size)
button_size_alt = Split::Alternative.new(button_size, "button_size")
expect(button_size_alt.participant_count).to eq(1)
end
context "with allow_multiple_experiments = 'control'" do
it "should let a user participate in many experiment with one non-'control' alternative" do
Split.configure do |config|
config.allow_multiple_experiments = "control"
end
groups = 100.times.map do |n|
ab_test("test#{n}".to_sym, { "control" => (100 - n) }, { "test#{n}-alt" => n })
end
experiments = ab_user.active_experiments
expect(experiments.size).to be > 1
count_control = experiments.values.count { |g| g == "control" }
expect(count_control).to eq(experiments.size - 1)
count_alts = groups.count { |g| g != "control" }
expect(count_alts).to eq(1)
end
context "when user already has experiment" do
let(:mock_user) { Split::User.new(self, { "test_0" => "test-alt" }) }
before do
Split.configure do |config|
config.allow_multiple_experiments = "control"
end
Split::ExperimentCatalog.find_or_initialize("test_0", "control", "test-alt").save
Split::ExperimentCatalog.find_or_initialize("test_1", "control", "test-alt").save
end
it "should restore previously selected alternative" do
expect(ab_user.active_experiments.size).to eq 1
expect(ab_test(:test_0, { "control" => 100 }, { "test-alt" => 1 })).to eq "test-alt"
expect(ab_test(:test_0, { "control" => 1 }, { "test-alt" => 100 })).to eq "test-alt"
end
it "should select the correct alternatives after experiment resets" do
experiment = Split::ExperimentCatalog.find(:test_0)
experiment.reset
mock_user[experiment.key] = "test-alt"
expect(ab_user.active_experiments.size).to eq 1
expect(ab_test(:test_0, { "control" => 100 }, { "test-alt" => 1 })).to eq "test-alt"
expect(ab_test(:test_0, { "control" => 0 }, { "test-alt" => 100 })).to eq "test-alt"
end
it "lets override existing choice" do
pending "this requires user store reset on first call not depending on whelther it is current trial"
@params = { "ab_test" => { "test_1" => "test-alt" } }
expect(ab_test(:test_0, { "control" => 0 }, { "test-alt" => 100 })).to eq "control"
expect(ab_test(:test_1, { "control" => 100 }, { "test-alt" => 1 })).to eq "test-alt"
end
end
end
it "should not over-write a finished key when an experiment is on a later version" do
experiment.increment_version
ab_user = { experiment.key => "blue", experiment.finished_key => true }
finished_session = ab_user.dup
ab_test("link_color", "blue", "red")
expect(ab_user).to eq(finished_session)
end
end
describe "metadata" do
context "is defined" do
before do
Split.configuration.experiments = {
my_experiment: {
alternatives: ["one", "two"],
resettable: false,
metadata: { "one" => "Meta1", "two" => "Meta2" }
}
}
end
it "should be passed to helper block" do
@params = { "ab_test" => { "my_experiment" => "two" } }
expect(ab_test("my_experiment")).to eq "two"
expect(ab_test("my_experiment") do |alternative, meta|
meta
end).to eq("Meta2")
end
it "should pass control metadata helper block if library disabled" do
Split.configure do |config|
config.enabled = false
end
expect(ab_test("my_experiment")).to eq "one"
expect(ab_test("my_experiment") do |_, meta|
meta
end).to eq("Meta1")
end
end
context "is not defined" do
before do
Split.configuration.experiments = {
my_experiment: {
alternatives: ["one", "two"],
resettable: false,
metadata: nil
}
}
end
it "should be passed to helper block" do
expect(ab_test("my_experiment") do |alternative, meta|
meta
end).to eq({})
end
it "should pass control metadata helper block if library disabled" do
Split.configure do |config|
config.enabled = false
end
expect(ab_test("my_experiment") do |_, meta|
meta
end).to eq({})
end
end
end
describe "ab_finished" do
context "for an experiment that the user participates in" do
before(:each) do
@experiment_name = "link_color"
@alternatives = ["blue", "red"]
@experiment = Split::ExperimentCatalog.find_or_create(@experiment_name, *@alternatives)
@alternative_name = ab_test(@experiment_name, *@alternatives)
@previous_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count
end
it "should increment the counter for the completed alternative" do
ab_finished(@experiment_name)
new_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count
expect(new_completion_count).to eq(@previous_completion_count + 1)
end
it "should set experiment's finished key if reset is false" do
ab_finished(@experiment_name, { reset: false })
expect(ab_user[@experiment.key]).to eq(@alternative_name)
expect(ab_user[@experiment.finished_key]).to eq(true)
end
it "should not increment the counter if reset is false and the experiment has been already finished" do
2.times { ab_finished(@experiment_name, { reset: false }) }
new_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count
expect(new_completion_count).to eq(@previous_completion_count + 1)
end
it "should not increment the counter for an ended experiment" do
e = Split::ExperimentCatalog.find_or_create("button_size", "small", "big")
e.winner = "small"
a = ab_test("button_size", "small", "big")
expect(a).to eq("small")
expect {
ab_finished("button_size")
}.not_to change { Split::Alternative.new(a, "button_size").completed_count }
end
it "should clear out the user's participation from their session" do
expect(ab_user[@experiment.key]).to eq(@alternative_name)
ab_finished(@experiment_name)
expect(ab_user.keys).to be_empty
end
it "should not clear out the users session if reset is false" do
expect(ab_user[@experiment.key]).to eq(@alternative_name)
ab_finished(@experiment_name, { reset: false })
expect(ab_user[@experiment.key]).to eq(@alternative_name)
expect(ab_user[@experiment.finished_key]).to eq(true)
end
it "should reset the users session when experiment is not versioned" do
expect(ab_user[@experiment.key]).to eq(@alternative_name)
ab_finished(@experiment_name)
expect(ab_user.keys).to be_empty
end
it "should reset the users session when experiment is versioned" do
@experiment.increment_version
@alternative_name = ab_test(@experiment_name, *@alternatives)
expect(ab_user[@experiment.key]).to eq(@alternative_name)
ab_finished(@experiment_name)
expect(ab_user.keys).to be_empty
end
context "when on_trial_complete is set" do
before { Split.configuration.on_trial_complete = :some_method }
it "should call the method" do
expect(self).to receive(:some_method)
ab_finished(@experiment_name)
end
it "should not call the method without alternative" do
ab_user[@experiment.key] = nil
expect(self).not_to receive(:some_method)
ab_finished(@experiment_name)
end
end
end
context "for an experiment that the user is excluded from" do
before do
alternative = ab_test("link_color", "blue", "red")
expect(Split::Alternative.new(alternative, "link_color").participant_count).to eq(1)
alternative = ab_test("button_size", "small", "big")
expect(Split::Alternative.new(alternative, "button_size").participant_count).to eq(0)
end
it "should not increment the completed counter" do
# So, user should be participating in the link_color experiment and
# receive the control for button_size. As the user is not participating in
# the button size experiment, finishing it should not increase the
# completion count for that alternative.
expect {
ab_finished("button_size")
}.not_to change { Split::Alternative.new("small", "button_size").completed_count }
end
end
context "for an experiment that the user does not participate in" do
before do
Split::ExperimentCatalog.find_or_create(:not_started_experiment, "control", "alt")
end
it "should not raise an exception" do
expect { ab_finished(:not_started_experiment) }.not_to raise_exception
end
it "should not change the user state when reset is false" do
expect { ab_finished(:not_started_experiment, reset: false) }.not_to change { ab_user.keys }.from([])
end
it "should not change the user state when reset is true" do
expect(self).not_to receive(:reset!)
ab_finished(:not_started_experiment)
end
it "should not increment the completed counter" do
ab_finished(:not_started_experiment)
expect(Split::Alternative.new("control", :not_started_experiment).completed_count).to eq(0)
expect(Split::Alternative.new("alt", :not_started_experiment).completed_count).to eq(0)
end
end
end
context "finished with config" do
it "passes reset option" do
Split.configuration.experiments = {
my_experiment: {
alternatives: ["one", "two"],
resettable: false,
}
}
alternative = ab_test(:my_experiment)
experiment = Split::ExperimentCatalog.find :my_experiment
ab_finished :my_experiment
expect(ab_user[experiment.key]).to eq(alternative)
expect(ab_user[experiment.finished_key]).to eq(true)
end
end
context "finished with metric name" do
before { Split.configuration.experiments = {} }
before { expect(Split::Alternative).to receive(:new).at_least(1).times.and_call_original }
def should_finish_experiment(experiment_name, should_finish = true)
alts = Split.configuration.experiments[experiment_name][:alternatives]
experiment = Split::ExperimentCatalog.find_or_create(experiment_name, *alts)
alt_name = ab_user[experiment.key] = alts.first
alt = double("alternative")
expect(alt).to receive(:name).at_most(1).times.and_return(alt_name)
expect(Split::Alternative).to receive(:new).at_most(1).times.with(alt_name, experiment_name.to_s).and_return(alt)
if should_finish
expect(alt).to receive(:increment_completion).at_most(1).times
else
expect(alt).not_to receive(:increment_completion)
end
end
it "completes the test" do
Split.configuration.experiments[:my_experiment] = {
alternatives: [ "control_opt", "other_opt" ],
metric: :my_metric
}
should_finish_experiment :my_experiment
ab_finished :my_metric
end
it "completes all relevant tests" do
Split.configuration.experiments = {
exp_1: {
alternatives: [ "1-1", "1-2" ],
metric: :my_metric
},
exp_2: {
alternatives: [ "2-1", "2-2" ],
metric: :another_metric
},
exp_3: {
alternatives: [ "3-1", "3-2" ],
metric: :my_metric
},
}
should_finish_experiment :exp_1
should_finish_experiment :exp_2, false
should_finish_experiment :exp_3
ab_finished :my_metric
end
it "passes reset option" do
Split.configuration.experiments = {
my_exp: {
alternatives: ["one", "two"],
metric: :my_metric,
resettable: false,
}
}
alternative_name = ab_test(:my_exp)
exp = Split::ExperimentCatalog.find :my_exp
ab_finished :my_metric
expect(ab_user[exp.key]).to eq(alternative_name)
expect(ab_user[exp.finished_key]).to be_truthy
end
it "passes through options" do
Split.configuration.experiments = {
my_exp: {
alternatives: ["one", "two"],
metric: :my_metric,
}
}
alternative_name = ab_test(:my_exp)
exp = Split::ExperimentCatalog.find :my_exp
ab_finished :my_metric, reset: false
expect(ab_user[exp.key]).to eq(alternative_name)
expect(ab_user[exp.finished_key]).to be_truthy
end
end
describe "conversions" do
it "should return a conversion rate for an alternative" do
alternative_name = ab_test("link_color", "blue", "red")
previous_convertion_rate = Split::Alternative.new(alternative_name, "link_color").conversion_rate
expect(previous_convertion_rate).to eq(0.0)
ab_finished("link_color")
new_convertion_rate = Split::Alternative.new(alternative_name, "link_color").conversion_rate
expect(new_convertion_rate).to eq(1.0)
end
end
describe "active experiments" do
it "should show an active test" do
alternative = ab_test("def", "4", "5", "6")
expect(active_experiments.count).to eq 1
expect(active_experiments.first[0]).to eq "def"
expect(active_experiments.first[1]).to eq alternative
end
it "should show a finished test" do
alternative = ab_test("def", "4", "5", "6")
ab_finished("def", { reset: false })
expect(active_experiments.count).to eq 1
expect(active_experiments.first[0]).to eq "def"
expect(active_experiments.first[1]).to eq alternative
end
it "should show an active test when an experiment is on a later version" do
experiment.reset
expect(experiment.version).to eq(1)
ab_test("link_color", "blue", "red")
expect(active_experiments.count).to eq 1
expect(active_experiments.first[0]).to eq "link_color"
end
it "should show versioned tests properly" do
10.times { experiment.reset }
alternative = ab_test(experiment.name, "blue", "red")
ab_finished(experiment.name, reset: false)
expect(experiment.version).to eq(10)
expect(active_experiments.count).to eq 1
expect(active_experiments).to eq({ "link_color" => alternative })
end
it "should show multiple tests" do
Split.configure do |config|
config.allow_multiple_experiments = true
end
alternative = ab_test("def", "4", "5", "6")
another_alternative = ab_test("ghi", "7", "8", "9")
expect(active_experiments.count).to eq 2
expect(active_experiments["def"]).to eq alternative
expect(active_experiments["ghi"]).to eq another_alternative
end
it "should not show tests with winners" do
Split.configure do |config|
config.allow_multiple_experiments = true
end
e = Split::ExperimentCatalog.find_or_create("def", "4", "5", "6")
e.winner = "4"
ab_test("def", "4", "5", "6")
another_alternative = ab_test("ghi", "7", "8", "9")
expect(active_experiments.count).to eq 1
expect(active_experiments.first[0]).to eq "ghi"
expect(active_experiments.first[1]).to eq another_alternative
end
end
describe "when user is a robot" do
before(:each) do
@request = OpenStruct.new(user_agent: "Googlebot/2.1 (+http://www.google.com/bot.html)")
end
describe "ab_test" do
it "should return the control" do
alternative = ab_test("link_color", "blue", "red")
expect(alternative).to eq experiment.control.name
end
it "should not create a experiment" do
ab_test("link_color", "blue", "red")
expect(Split::Experiment.new("link_color")).to be_a_new_record
end
it "should not increment the participation count" do
previous_red_count = Split::Alternative.new("red", "link_color").participant_count
previous_blue_count = Split::Alternative.new("blue", "link_color").participant_count
ab_test("link_color", "blue", "red")
new_red_count = Split::Alternative.new("red", "link_color").participant_count
new_blue_count = Split::Alternative.new("blue", "link_color").participant_count
expect((new_red_count + new_blue_count)).to eq(previous_red_count + previous_blue_count)
end
end
describe "finished" do
it "should not increment the completed count" do
alternative_name = ab_test("link_color", "blue", "red")
previous_completion_count = Split::Alternative.new(alternative_name, "link_color").completed_count
ab_finished("link_color")
new_completion_count = Split::Alternative.new(alternative_name, "link_color").completed_count
expect(new_completion_count).to eq(previous_completion_count)
end
end
end
describe "when providing custom ignore logic" do
context "using a proc to configure custom logic" do
before(:each) do
Split.configure do |c|
c.ignore_filter = proc { |request| true } # ignore everything
end
end
it "ignores the ab_test" do
ab_test("link_color", "blue", "red")
red_count = Split::Alternative.new("red", "link_color").participant_count
blue_count = Split::Alternative.new("blue", "link_color").participant_count
expect((red_count + blue_count)).to be(0)
end
end
end
shared_examples_for "a disabled test" do
describe "ab_test" do
it "should return the control" do
alternative = ab_test("link_color", "blue", "red")
expect(alternative).to eq experiment.control.name
end
it "should not increment the participation count" do
previous_red_count = Split::Alternative.new("red", "link_color").participant_count
previous_blue_count = Split::Alternative.new("blue", "link_color").participant_count
ab_test("link_color", "blue", "red")
new_red_count = Split::Alternative.new("red", "link_color").participant_count
new_blue_count = Split::Alternative.new("blue", "link_color").participant_count
expect((new_red_count + new_blue_count)).to eq(previous_red_count + previous_blue_count)
end
end
describe "finished" do
it "should not increment the completed count" do
alternative_name = ab_test("link_color", "blue", "red")
previous_completion_count = Split::Alternative.new(alternative_name, "link_color").completed_count
ab_finished("link_color")
new_completion_count = Split::Alternative.new(alternative_name, "link_color").completed_count
expect(new_completion_count).to eq(previous_completion_count)
end
end
end
describe "when ip address is ignored" do
context "individually" do
before(:each) do
@request = OpenStruct.new(ip: "81.19.48.130")
Split.configure do |c|
c.ignore_ip_addresses << "81.19.48.130"
end
end
it_behaves_like "a disabled test"
end
context "for a range" do
before(:each) do
@request = OpenStruct.new(ip: "81.19.48.129")
Split.configure do |c|
c.ignore_ip_addresses << /81\.19\.48\.[0-9]+/
end
end
it_behaves_like "a disabled test"
end
context "using both a range and a specific value" do
before(:each) do
@request = OpenStruct.new(ip: "81.19.48.128")
Split.configure do |c|
c.ignore_ip_addresses << "81.19.48.130"
c.ignore_ip_addresses << /81\.19\.48\.[0-9]+/
end
end
it_behaves_like "a disabled test"
end
context "when ignored other address" do
before do
@request = OpenStruct.new(ip: "1.1.1.1")
Split.configure do |c|
c.ignore_ip_addresses << "81.19.48.130"
end
end
it "works as usual" do
alternative_name = ab_test("link_color", "red", "blue")
expect {
ab_finished("link_color")
}.to change(Split::Alternative.new(alternative_name, "link_color"), :completed_count).by(1)
end
end
end
describe "when user is previewing" do
before(:each) do
@request = OpenStruct.new(headers: { "x-purpose" => "preview" })
end
it_behaves_like "a disabled test"
end
describe "versioned experiments" do
it "should use version zero if no version is present" do
alternative_name = ab_test("link_color", "blue", "red")
expect(experiment.version).to eq(0)
expect(ab_user["link_color"]).to eq(alternative_name)
end
it "should save the version of the experiment to the session" do
experiment.reset
expect(experiment.version).to eq(1)
alternative_name = ab_test("link_color", "blue", "red")
expect(ab_user["link_color:1"]).to eq(alternative_name)
end
it "should load the experiment even if the version is not 0" do
experiment.reset
expect(experiment.version).to eq(1)
alternative_name = ab_test("link_color", "blue", "red")
expect(ab_user["link_color:1"]).to eq(alternative_name)
return_alternative_name = ab_test("link_color", "blue", "red")
expect(return_alternative_name).to eq(alternative_name)
end
it "should reset the session of a user on an older version of the experiment" do
alternative_name = ab_test("link_color", "blue", "red")
expect(ab_user["link_color"]).to eq(alternative_name)
alternative = Split::Alternative.new(alternative_name, "link_color")
expect(alternative.participant_count).to eq(1)
experiment.reset
expect(experiment.version).to eq(1)
alternative = Split::Alternative.new(alternative_name, "link_color")
expect(alternative.participant_count).to eq(0)
new_alternative_name = ab_test("link_color", "blue", "red")
expect(ab_user["link_color:1"]).to eq(new_alternative_name)
new_alternative = Split::Alternative.new(new_alternative_name, "link_color")
expect(new_alternative.participant_count).to eq(1)
end
it "should cleanup old versions of experiments from the session" do
alternative_name = ab_test("link_color", "blue", "red")
expect(ab_user["link_color"]).to eq(alternative_name)
alternative = Split::Alternative.new(alternative_name, "link_color")
expect(alternative.participant_count).to eq(1)
experiment.reset
expect(experiment.version).to eq(1)
alternative = Split::Alternative.new(alternative_name, "link_color")
expect(alternative.participant_count).to eq(0)
new_alternative_name = ab_test("link_color", "blue", "red")
expect(ab_user["link_color:1"]).to eq(new_alternative_name)
end
it "should only count completion of users on the current version" do
alternative_name = ab_test("link_color", "blue", "red")
expect(ab_user["link_color"]).to eq(alternative_name)
Split::Alternative.new(alternative_name, "link_color")
experiment.reset
expect(experiment.version).to eq(1)
ab_finished("link_color")
alternative = Split::Alternative.new(alternative_name, "link_color")
expect(alternative.completed_count).to eq(0)
end
end
context "when redis is not available" do
before(:each) do
expect(Split).to receive(:redis).at_most(5).times.and_raise(Errno::ECONNREFUSED.new)
end
context "and db_failover config option is turned off" do
before(:each) do
Split.configure do |config|
config.db_failover = false
end
end
describe "ab_test" do
it "should raise an exception" do
expect { ab_test("link_color", "blue", "red") }.to raise_error(Errno::ECONNREFUSED)
end
end
describe "finished" do
it "should raise an exception" do
expect { ab_finished("link_color") }.to raise_error(Errno::ECONNREFUSED)
end
end
describe "disable split testing" do
before(:each) do
Split.configure do |config|
config.enabled = false
end
end
it "should not attempt to connect to redis" do
expect { ab_test("link_color", "blue", "red") }.not_to raise_error
end
it "should return control variable" do
expect(ab_test("link_color", "blue", "red")).to eq("blue")
expect { ab_finished("link_color") }.not_to raise_error
end
end
end
context "and db_failover config option is turned on" do
before(:each) do
Split.configure do |config|
config.db_failover = true
end
end
describe "ab_test" do
it "should not raise an exception" do
expect { ab_test("link_color", "blue", "red") }.not_to raise_error
end
it "should call db_failover_on_db_error proc with error as parameter" do
Split.configure do |config|
config.db_failover_on_db_error = proc do |error|
expect(error).to be_a(Errno::ECONNREFUSED)
end
end
expect(Split.configuration.db_failover_on_db_error).to receive(:call).and_call_original
ab_test("link_color", "blue", "red")
end
it "should always use first alternative" do
expect(ab_test("link_color", "blue", "red")).to eq("blue")
expect(ab_test("link_color", { "blue" => 0.01 }, "red" => 0.2)).to eq("blue")
expect(ab_test("link_color", { "blue" => 0.8 }, { "red" => 20 })).to eq("blue")
expect(ab_test("link_color", "blue", "red") do |alternative|
"shared/#{alternative}"
end).to eq("shared/blue")
end
context "and db_failover_allow_parameter_override config option is turned on" do
before(:each) do
Split.configure do |config|
config.db_failover_allow_parameter_override = true
end
end
context "and given an override parameter" do
it "should use given override instead of the first alternative" do
@params = { "ab_test" => { "link_color" => "red" } }
expect(ab_test("link_color", "blue", "red")).to eq("red")
expect(ab_test("link_color", "blue", "red", "green")).to eq("red")
expect(ab_test("link_color", { "blue" => 0.01 }, "red" => 0.2)).to eq("red")
expect(ab_test("link_color", { "blue" => 0.8 }, { "red" => 20 })).to eq("red")
expect(ab_test("link_color", "blue", "red") do |alternative|
"shared/#{alternative}"
end).to eq("shared/red")
end
end
end
context "and preloaded config given" do
before do
Split.configuration.experiments[:link_color] = {
alternatives: [ "blue", "red" ],
}
end
it "uses first alternative" do
expect(ab_test(:link_color)).to eq("blue")
end
end
end
describe "finished" do
it "should not raise an exception" do
expect { ab_finished("link_color") }.not_to raise_error
end
it "should call db_failover_on_db_error proc with error as parameter" do
Split.configure do |config|
config.db_failover_on_db_error = proc do |error|
expect(error).to be_a(Errno::ECONNREFUSED)
end
end
expect(Split.configuration.db_failover_on_db_error).to receive(:call).and_call_original
ab_finished("link_color")
end
end
end
end
context "with preloaded config" do
before { Split.configuration.experiments = {} }
it "pulls options from config file" do
Split.configuration.experiments[:my_experiment] = {
alternatives: [ "control_opt", "other_opt" ],
goals: ["goal1", "goal2"]
}
ab_test :my_experiment
expect(Split::Experiment.new(:my_experiment).alternatives.map(&:name)).to eq([ "control_opt", "other_opt" ])
expect(Split::Experiment.new(:my_experiment).goals).to eq([ "goal1", "goal2" ])
end
it "can be called multiple times" do
Split.configuration.experiments[:my_experiment] = {
alternatives: [ "control_opt", "other_opt" ],
goals: ["goal1", "goal2"]
}
5.times { ab_test :my_experiment }
experiment = Split::Experiment.new(:my_experiment)
expect(experiment.alternatives.map(&:name)).to eq([ "control_opt", "other_opt" ])
expect(experiment.goals).to eq([ "goal1", "goal2" ])
expect(experiment.participant_count).to eq(1)
end
it "accepts multiple goals" do
Split.configuration.experiments[:my_experiment] = {
alternatives: [ "control_opt", "other_opt" ],
goals: [ "goal1", "goal2", "goal3" ]
}
ab_test :my_experiment
experiment = Split::Experiment.new(:my_experiment)
expect(experiment.goals).to eq([ "goal1", "goal2", "goal3" ])
end
it "allow specifying goals to be optional" do
Split.configuration.experiments[:my_experiment] = {
alternatives: [ "control_opt", "other_opt" ]
}
experiment = Split::Experiment.new(:my_experiment)
expect(experiment.goals).to eq([])
end
it "accepts multiple alternatives" do
Split.configuration.experiments[:my_experiment] = {
alternatives: [ "control_opt", "second_opt", "third_opt" ],
}
ab_test :my_experiment
experiment = Split::Experiment.new(:my_experiment)
expect(experiment.alternatives.map(&:name)).to eq([ "control_opt", "second_opt", "third_opt" ])
end
it "accepts probability on alternatives" do
Split.configuration.experiments[:my_experiment] = {
alternatives: [
{ name: "control_opt", percent: 67 },
{ name: "second_opt", percent: 10 },
{ name: "third_opt", percent: 23 },
],
}
ab_test :my_experiment
experiment = Split::Experiment.new(:my_experiment)
expect(experiment.alternatives.collect { |a| [a.name, a.weight] }).to eq([["control_opt", 0.67], ["second_opt", 0.1], ["third_opt", 0.23]])
end
it "accepts probability on some alternatives" do
Split.configuration.experiments[:my_experiment] = {
alternatives: [
{ name: "control_opt", percent: 34 },
"second_opt",
{ name: "third_opt", percent: 23 },
"fourth_opt",
],
}
ab_test :my_experiment
experiment = Split::Experiment.new(:my_experiment)
names_and_weights = experiment.alternatives.collect { |a| [a.name, a.weight] }
expect(names_and_weights).to eq([["control_opt", 0.34], ["second_opt", 0.215], ["third_opt", 0.23], ["fourth_opt", 0.215]])
expect(names_and_weights.inject(0) { |sum, nw| sum + nw[1] }).to eq(1.0)
end
it "allows name param without probability" do
Split.configuration.experiments[:my_experiment] = {
alternatives: [
{ name: "control_opt" },
"second_opt",
{ name: "third_opt", percent: 64 },
],
}
ab_test :my_experiment
experiment = Split::Experiment.new(:my_experiment)
names_and_weights = experiment.alternatives.collect { |a| [a.name, a.weight] }
expect(names_and_weights).to eq([["control_opt", 0.18], ["second_opt", 0.18], ["third_opt", 0.64]])
expect(names_and_weights.inject(0) { |sum, nw| sum + nw[1] }).to eq(1.0)
end
it "fails gracefully if config is missing experiment" do
Split.configuration.experiments = { other_experiment: { foo: "Bar" } }
expect { ab_test :my_experiment }.to raise_error(Split::ExperimentNotFound)
end
it "fails gracefully if config is missing" do
expect { Split.configuration.experiments = nil }.to raise_error(Split::InvalidExperimentsFormatError)
end
it "fails gracefully if config is missing alternatives" do
Split.configuration.experiments[:my_experiment] = { foo: "Bar" }
expect { ab_test :my_experiment }.to raise_error(NoMethodError)
end
end
it "should handle multiple experiments correctly" do
experiment2 = Split::ExperimentCatalog.find_or_create("link_color2", "blue", "red")
ab_test("link_color", "blue", "red")
ab_test("link_color2", "blue", "red")
ab_finished("link_color2")
experiment2.alternatives.each do |alt|
expect(alt.unfinished_count).to eq(0)
end
end
context "with goals" do
before do
@experiment = { "link_color" => ["purchase", "refund"] }
@alternatives = ["blue", "red"]
@experiment_name, @goals = normalize_metric(@experiment)
@goal1 = @goals[0]
@goal2 = @goals[1]
end
it "should normalize experiment" do
expect(@experiment_name).to eq("link_color")
expect(@goals).to eq(["purchase", "refund"])
end
describe "ab_test" do
it "should allow experiment goals interface as a single hash" do
ab_test(@experiment, *@alternatives)
experiment = Split::ExperimentCatalog.find("link_color")
expect(experiment.goals).to eq(["purchase", "refund"])
end
end
describe "ab_finished" do
before do
@alternative_name = ab_test(@experiment, *@alternatives)
end
it "should increment the counter for the specified-goal completed alternative" do
expect { ab_finished({ "link_color" => ["purchase"] }) }
.to change { Split::Alternative.new(@alternative_name, @experiment_name).completed_count(@goal2) }.by(0)
.and change { Split::Alternative.new(@alternative_name, @experiment_name).completed_count(@goal1) }.by(1)
end
end
end
end
<MSG> Merge pull request #83 from philnash/stop-overwriting-finished-values
Getting negative non-finished rates when using finished(:reset => false)
<DFF> @@ -109,6 +109,15 @@ describe Split::Helper do
button_size_alt = Split::Alternative.new(button_size, 'button_size')
button_size_alt.participant_count.should eql(1)
end
+
+ it "should not over-write a finished key when an experiment is on a later version" do
+ experiment = Split::Experiment.find_or_create('link_color', 'blue', 'red')
+ experiment.increment_version
+ session[:split] = { experiment.key => 'blue', experiment.finished_key => true }
+ finshed_session = session[:split].dup
+ ab_test('link_color', 'blue', 'red')
+ session[:split].should eql(finshed_session)
+ end
end
describe 'finished' do
| 9 | Merge pull request #83 from philnash/stop-overwriting-finished-values | 0 | .rb | rb | mit | splitrb/split |
10071053 | <NME> split.gemspec
<BEF> # -*- encoding: utf-8 -*-
$:.push File.expand_path("../lib", __FILE__)
require "split/version"
Gem::Specification.new do |s|
s.name = "split"
s.version = Split::VERSION
s.platform = Gem::Platform::RUBY
s.authors = ["Andrew Nesbitt"]
s.licenses = ["MIT"]
s.email = ["[email protected]"]
s.homepage = "https://github.com/splitrb/split"
s.summary = "Rack based split testing framework"
s.metadata = {
"homepage_uri" => "https://github.com/splitrb/split",
"changelog_uri" => "https://github.com/splitrb/split/blob/main/CHANGELOG.md",
"source_code_uri" => "https://github.com/splitrb/split",
"bug_tracker_uri" => "https://github.com/splitrb/split/issues",
"wiki_uri" => "https://github.com/splitrb/split/wiki",
"mailing_list_uri" => "https://groups.google.com/d/forum/split-ruby"
}
s.required_ruby_version = ">= 2.5.0"
s.required_rubygems_version = ">= 2.0.0"
s.files = `git ls-files`.split("\n")
s.test_files = `git ls-files -- {test,spec,features}/*`.split("\n")
s.require_paths = ["lib"]
s.add_dependency "redis", ">= 4.2"
s.add_dependency "sinatra", ">= 1.2.6"
s.add_dependency "rubystats", ">= 0.3.0"
s.add_development_dependency "bundler", ">= 1.17"
s.add_development_dependency "simplecov", "~> 0.15"
s.add_development_dependency "rack-test", "~> 2.0"
s.add_development_dependency "rake", "~> 13"
s.add_development_dependency "rspec", "~> 3.7"
s.add_development_dependency "pry", "~> 0.10"
s.add_development_dependency "rails", ">= 5.0"
end
<MSG> Merge pull request #595 from splitrb/fix-frozen-string-literal-comment
Add frozen_string_literal to all files that were missing it
<DFF> @@ -1,4 +1,5 @@
# -*- encoding: utf-8 -*-
+# frozen_string_literal: true
$:.push File.expand_path("../lib", __FILE__)
require "split/version"
| 1 | Merge pull request #595 from splitrb/fix-frozen-string-literal-comment | 0 | .gemspec | gemspec | mit | splitrb/split |
10071054 | <NME> split.gemspec
<BEF> # -*- encoding: utf-8 -*-
$:.push File.expand_path("../lib", __FILE__)
require "split/version"
Gem::Specification.new do |s|
s.name = "split"
s.version = Split::VERSION
s.platform = Gem::Platform::RUBY
s.authors = ["Andrew Nesbitt"]
s.licenses = ["MIT"]
s.email = ["[email protected]"]
s.homepage = "https://github.com/splitrb/split"
s.summary = "Rack based split testing framework"
s.metadata = {
"homepage_uri" => "https://github.com/splitrb/split",
"changelog_uri" => "https://github.com/splitrb/split/blob/main/CHANGELOG.md",
"source_code_uri" => "https://github.com/splitrb/split",
"bug_tracker_uri" => "https://github.com/splitrb/split/issues",
"wiki_uri" => "https://github.com/splitrb/split/wiki",
"mailing_list_uri" => "https://groups.google.com/d/forum/split-ruby"
}
s.required_ruby_version = ">= 2.5.0"
s.required_rubygems_version = ">= 2.0.0"
s.files = `git ls-files`.split("\n")
s.test_files = `git ls-files -- {test,spec,features}/*`.split("\n")
s.require_paths = ["lib"]
s.add_dependency "redis", ">= 4.2"
s.add_dependency "sinatra", ">= 1.2.6"
s.add_dependency "rubystats", ">= 0.3.0"
s.add_development_dependency "bundler", ">= 1.17"
s.add_development_dependency "simplecov", "~> 0.15"
s.add_development_dependency "rack-test", "~> 2.0"
s.add_development_dependency "rake", "~> 13"
s.add_development_dependency "rspec", "~> 3.7"
s.add_development_dependency "pry", "~> 0.10"
s.add_development_dependency "rails", ">= 5.0"
end
<MSG> Merge pull request #595 from splitrb/fix-frozen-string-literal-comment
Add frozen_string_literal to all files that were missing it
<DFF> @@ -1,4 +1,5 @@
# -*- encoding: utf-8 -*-
+# frozen_string_literal: true
$:.push File.expand_path("../lib", __FILE__)
require "split/version"
| 1 | Merge pull request #595 from splitrb/fix-frozen-string-literal-comment | 0 | .gemspec | gemspec | mit | splitrb/split |
10071055 | <NME> split.gemspec
<BEF> # -*- encoding: utf-8 -*-
$:.push File.expand_path("../lib", __FILE__)
require "split/version"
Gem::Specification.new do |s|
s.name = "split"
s.version = Split::VERSION
s.platform = Gem::Platform::RUBY
s.authors = ["Andrew Nesbitt"]
s.licenses = ["MIT"]
s.email = ["[email protected]"]
s.homepage = "https://github.com/splitrb/split"
s.summary = "Rack based split testing framework"
s.metadata = {
"homepage_uri" => "https://github.com/splitrb/split",
"changelog_uri" => "https://github.com/splitrb/split/blob/main/CHANGELOG.md",
"source_code_uri" => "https://github.com/splitrb/split",
"bug_tracker_uri" => "https://github.com/splitrb/split/issues",
"wiki_uri" => "https://github.com/splitrb/split/wiki",
"mailing_list_uri" => "https://groups.google.com/d/forum/split-ruby"
}
s.required_ruby_version = ">= 2.5.0"
s.required_rubygems_version = ">= 2.0.0"
s.files = `git ls-files`.split("\n")
s.test_files = `git ls-files -- {test,spec,features}/*`.split("\n")
s.require_paths = ["lib"]
s.add_dependency "redis", ">= 4.2"
s.add_dependency "sinatra", ">= 1.2.6"
s.add_dependency "rubystats", ">= 0.3.0"
s.add_development_dependency "bundler", ">= 1.17"
s.add_development_dependency "simplecov", "~> 0.15"
s.add_development_dependency "rack-test", "~> 2.0"
s.add_development_dependency "rake", "~> 13"
s.add_development_dependency "rspec", "~> 3.7"
s.add_development_dependency "pry", "~> 0.10"
s.add_development_dependency "rails", ">= 5.0"
end
<MSG> Merge pull request #595 from splitrb/fix-frozen-string-literal-comment
Add frozen_string_literal to all files that were missing it
<DFF> @@ -1,4 +1,5 @@
# -*- encoding: utf-8 -*-
+# frozen_string_literal: true
$:.push File.expand_path("../lib", __FILE__)
require "split/version"
| 1 | Merge pull request #595 from splitrb/fix-frozen-string-literal-comment | 0 | .gemspec | gemspec | mit | splitrb/split |
10071056 | <NME> README.md
<BEF> # [Split](https://libraries.io/rubygems/split)
[](http://badge.fury.io/rb/split)

[](https://codeclimate.com/github/splitrb/split)
[](https://codeclimate.com/github/splitrb/split/coverage)
[](https://github.com/RichardLitt/standard-readme)
[](https://www.codetriage.com/splitrb/split)
> 📈 The Rack Based A/B testing framework https://libraries.io/rubygems/split
Split is a rack based A/B testing framework designed to work with Rails, Sinatra or any other rack based app.
Split is heavily inspired by the [Abingo](https://github.com/ryanb/abingo) and [Vanity](https://github.com/assaf/vanity) Rails A/B testing plugins and [Resque](https://github.com/resque/resque) in its use of Redis.
Split is designed to be hacker friendly, allowing for maximum customisation and extensibility.
## Install
### Requirements
Split v4.0+ is currently tested with Ruby >= 2.5 and Rails >= 5.2.
If your project requires compatibility with Ruby 2.4.x or older Rails versions. You can try v3.0 or v0.8.0(for Ruby 1.9.3)
Split uses Redis as a datastore.
Split only supports Redis 4.0 or greater.
If you're on OS X, Homebrew is the simplest way to install Redis:
```bash
brew install redis
redis-server /usr/local/etc/redis.conf
```
You now have a Redis daemon running on port `6379`.
### Setup
```bash
gem install split
```
#### Rails
Adding `gem 'split'` to your Gemfile will autoload it when rails starts up, as long as you've configured Redis it will 'just work'.
#### Sinatra
To configure Sinatra with Split you need to enable sessions and mix in the helper methods. Add the following lines at the top of your Sinatra app:
```ruby
require 'split'
class MySinatraApp < Sinatra::Base
enable :sessions
helpers Split::Helper
get '/' do
...
end
```
## Usage
To begin your A/B test use the `ab_test` method, naming your experiment with the first argument and then the different alternatives which you wish to test on as the other arguments.
`ab_test` returns one of the alternatives, if a user has already seen that test they will get the same alternative as before, which you can use to split your code on.
It can be used to render different templates, show different text or any other case based logic.
`ab_finished` is used to make a completion of an experiment, or conversion.
Example: View
```erb
<% ab_test(:login_button, "/images/button1.jpg", "/images/button2.jpg") do |button_file| %>
<%= image_tag(button_file, alt: "Login!") %>
<% end %>
```
Example: Controller
```ruby
def register_new_user
# See what level of free points maximizes users' decision to buy replacement points.
@starter_points = ab_test(:new_user_free_points, '100', '200', '300')
end
```
Example: Conversion tracking (in a controller!)
```ruby
def buy_new_points
# some business logic
ab_finished(:new_user_free_points)
end
```
Example: Conversion tracking (in a view)
```erb
Thanks for signing up, dude! <% ab_finished(:signup_page_redesign) %>
```
You can find more examples, tutorials and guides on the [wiki](https://github.com/splitrb/split/wiki).
## Statistical Validity
Split has two options for you to use to determine which alternative is the best.
The first option (default on the dashboard) uses a z test (n>30) for the difference between your control and alternative conversion rates to calculate statistical significance. This test will tell you whether an alternative is better or worse than your control, but it will not distinguish between which alternative is the best in an experiment with multiple alternatives. Split will only tell you if your experiment is 90%, 95%, or 99% significant, and this test only works if you have more than 30 participants and 5 conversions for each branch.
As per this [blog post](https://www.evanmiller.org/how-not-to-run-an-ab-test.html) on the pitfalls of A/B testing, it is highly recommended that you determine your requisite sample size for each branch before running the experiment. Otherwise, you'll have an increased rate of false positives (experiments which show a significant effect where really there is none).
[Here](https://www.evanmiller.org/ab-testing/sample-size.html) is a sample size calculator for your convenience.
The second option uses simulations from a beta distribution to determine the probability that the given alternative is the winner compared to all other alternatives. You can view these probabilities by clicking on the drop-down menu labeled "Confidence." This option should be used when the experiment has more than just 1 control and 1 alternative. It can also be used for a simple, 2-alternative A/B test.
Calculating the beta-distribution simulations for a large number of experiments can be slow, so the results are cached. You can specify how often they should be recalculated (the default is once per day).
```ruby
Split.configure do |config|
config.winning_alternative_recalculation_interval = 3600 # 1 hour
end
```
## Extras
### Weighted alternatives
Perhaps you only want to show an alternative to 10% of your visitors because it is very experimental or not yet fully load tested.
To do this you can pass a weight with each alternative in the following ways:
```ruby
ab_test(:homepage_design, {'Old' => 18}, {'New' => 2})
ab_test(:homepage_design, 'Old', {'New' => 1.0/9})
ab_test(:homepage_design, {'Old' => 9}, 'New')
```
This will only show the new alternative to visitors 1 in 10 times, the default weight for an alternative is 1.
### Overriding alternatives
For development and testing, you may wish to force your app to always return an alternative.
You can do this by passing it as a parameter in the url.
If you have an experiment called `button_color` with alternatives called `red` and `blue` used on your homepage, a url such as:
http://myawesomesite.com?ab_test[button_color]=red
will always have red buttons. This won't be stored in your session or count towards to results, unless you set the `store_override` configuration option.
In the event you want to disable all tests without having to know the individual experiment names, add a `SPLIT_DISABLE` query parameter.
http://myawesomesite.com?SPLIT_DISABLE=true
It is not required to send `SPLIT_DISABLE=false` to activate Split.
### Rspec Helper
To aid testing with RSpec, write `spec/support/split_helper.rb` and call `use_ab_test(alternatives_by_experiment)` in your specs as instructed below:
```ruby
# Create a file with these contents at 'spec/support/split_helper.rb'
# and ensure it is `require`d in your rails_helper.rb or spec_helper.rb
module SplitHelper
# Force a specific experiment alternative to always be returned:
# use_ab_test(signup_form: "single_page")
#
# Force alternatives for multiple experiments:
# use_ab_test(signup_form: "single_page", pricing: "show_enterprise_prices")
#
def use_ab_test(alternatives_by_experiment)
allow_any_instance_of(Split::Helper).to receive(:ab_test) do |_receiver, experiment, &block|
variant = alternatives_by_experiment.fetch(experiment) { |key| raise "Unknown experiment '#{key}'" }
block.call(variant) unless block.nil?
variant
end
end
end
# Make the `use_ab_test` method available to all specs:
RSpec.configure do |config|
config.include SplitHelper
end
```
Now you can call `use_ab_test(alternatives_by_experiment)` in your specs, for example:
```ruby
it "registers using experimental signup" do
use_ab_test experiment_name: "alternative_name"
post "/signups"
...
end
```
### Starting experiments manually
By default new A/B tests will be active right after deployment. In case you would like to start new test a while after
the deploy, you can do it by setting the `start_manually` configuration option to `true`.
After choosing this option tests won't be started right after deploy, but after pressing the `Start` button in Split admin dashboard. If a test is deleted from the Split dashboard, then it can only be started after pressing the `Start` button whenever being re-initialized.
### Reset after completion
When a user completes a test their session is reset so that they may start the test again in the future.
To stop this behaviour you can pass the following option to the `ab_finished` method:
```ruby
ab_finished(:experiment_name, reset: false)
```
The user will then always see the alternative they started with.
Any old unfinished experiment key will be deleted from the user's data storage if the experiment had been removed or is over and a winner had been chosen. This allows a user to enroll into any new experiment in cases when the `allow_multiple_experiments` config option is set to `false`.
### Reset experiments manually
By default Split automatically resets the experiment whenever it detects the configuration for an experiment has changed (e.g. you call `ab_test` with different alternatives). You can prevent this by setting the option `reset_manually` to `true`.
You may want to do this when you want to change something, like the variants' names, the metadata about an experiment, etc. without resetting everything.
### Multiple experiments at once
By default Split will avoid users participating in multiple experiments at once. This means you are less likely to skew results by adding in more variation to your tests.
To stop this behaviour and allow users to participate in multiple experiments at once set the `allow_multiple_experiments` config option to true like so:
```ruby
Split.configure do |config|
config.allow_multiple_experiments = true
end
```
This will allow the user to participate in any number of experiments and belong to any alternative in each experiment. This has the possible downside of a variation in one experiment influencing the outcome of another.
To address this, setting the `allow_multiple_experiments` config option to 'control' like so:
```ruby
Split.configure do |config|
config.allow_multiple_experiments = 'control'
end
```
For this to work, each and every experiment you define must have an alternative named 'control'. This will allow the user to participate in multiple experiments as long as the user belongs to the alternative 'control' in each experiment. As soon as the user belongs to an alternative named something other than 'control' the user may not participate in any more experiments. Calling ab_test(<other experiments>) will always return the first alternative without adding the user to that experiment.
### Experiment Persistence
Split comes with three built-in persistence adapters for storing users and the alternatives they've been given for each experiment.
By default Split will store the tests for each user in the session.
You can optionally configure Split to use a cookie, Redis, or any custom adapter of your choosing.
#### Cookies
```ruby
Split.configure do |config|
config.persistence = :cookie
end
```
When using the cookie persistence, Split stores data into an anonymous tracking cookie named 'split', which expires in 1 year. To change that, set the `persistence_cookie_length` in the configuration (unit of time in seconds).
```ruby
Split.configure do |config|
config.persistence = :cookie
config.persistence_cookie_length = 2592000 # 30 days
end
```
The data stored consists of the experiment name and the variants the user is in. Example: { "experiment_name" => "variant_a" }
__Note:__ Using cookies depends on `ActionDispatch::Cookies` or any identical API
#### Redis
Using Redis will allow ab_users to persist across sessions or machines.
```ruby
Split.configure do |config|
config.persistence = Split::Persistence::RedisAdapter.with_config(lookup_by: -> (context) { context.current_user_id })
# Equivalent
# config.persistence = Split::Persistence::RedisAdapter.with_config(lookup_by: :current_user_id)
end
```
Options:
* `lookup_by`: method to invoke per request for uniquely identifying ab_users (mandatory configuration)
* `namespace`: separate namespace to store these persisted values (default "persistence")
* `expire_seconds`: sets TTL for user key. (if a user is in multiple experiments most recent update will reset TTL for all their assignments)
#### Dual Adapter
The Dual Adapter allows the use of different persistence adapters for logged-in and logged-out users. A common use case is to use Redis for logged-in users and Cookies for logged-out users.
```ruby
cookie_adapter = Split::Persistence::CookieAdapter
redis_adapter = Split::Persistence::RedisAdapter.with_config(
lookup_by: -> (context) { context.send(:current_user).try(:id) },
expire_seconds: 2592000)
Split.configure do |config|
config.persistence = Split::Persistence::DualAdapter.with_config(
logged_in: -> (context) { !context.send(:current_user).try(:id).nil? },
logged_in_adapter: redis_adapter,
logged_out_adapter: cookie_adapter)
config.persistence_cookie_length = 2592000 # 30 days
end
```
#### Custom Adapter
Your custom adapter needs to implement the same API as existing adapters.
See `Split::Persistence::CookieAdapter` or `Split::Persistence::SessionAdapter` for a starting point.
```ruby
Split.configure do |config|
config.persistence = YourCustomAdapterClass
end
```
### Trial Event Hooks
You can define methods that will be called at the same time as experiment
alternative participation and goal completion.
For example:
``` ruby
Split.configure do |config|
config.on_trial = :log_trial # run on every trial
config.on_trial_choose = :log_trial_choose # run on trials with new users only
config.on_trial_complete = :log_trial_complete
end
```
Set these attributes to a method name available in the same context as the
`ab_test` method. These methods should accept one argument, a `Trial` instance.
``` ruby
def log_trial(trial)
logger.info "experiment=%s alternative=%s user=%s" %
[ trial.experiment.name, trial.alternative, current_user.id ]
end
def log_trial_choose(trial)
logger.info "[new user] experiment=%s alternative=%s user=%s" %
[ trial.experiment.name, trial.alternative, current_user.id ]
end
def log_trial_complete(trial)
logger.info "experiment=%s alternative=%s user=%s complete=true" %
[ trial.experiment.name, trial.alternative, current_user.id ]
end
```
#### Views
If you are running `ab_test` from a view, you must define your event
hook callback as a
[helper_method](https://apidock.com/rails/AbstractController/Helpers/ClassMethods/helper_method)
in the controller:
``` ruby
helper_method :log_trial_choose
def log_trial_choose(trial)
logger.info "experiment=%s alternative=%s user=%s" %
[ trial.experiment.name, trial.alternative, current_user.id ]
end
```
### Experiment Hooks
You can assign a proc that will be called when an experiment is reset or deleted. You can use these hooks to call methods within your application to keep data related to experiments in sync with Split.
For example:
``` ruby
Split.configure do |config|
# after experiment reset or deleted
config.on_experiment_reset = -> (example) { # Do something on reset }
config.on_experiment_delete = -> (experiment) { # Do something else on delete }
# before experiment reset or deleted
config.on_before_experiment_reset = -> (example) { # Do something on reset }
config.on_before_experiment_delete = -> (experiment) { # Do something else on delete }
# after experiment winner had been set
config.on_experiment_winner_choose = -> (experiment) { # Do something on winner choose }
end
```
## Web Interface
Split comes with a Sinatra-based front end to get an overview of how your experiments are doing.
If you are running Rails 2: You can mount this inside your app using Rack::URLMap in your `config.ru`
```ruby
require 'split/dashboard'
run Rack::URLMap.new \
"/" => Your::App.new,
"/split" => Split::Dashboard.new
```
However, if you are using Rails 3 or higher: You can mount this inside your app routes by first adding this to the Gemfile:
```ruby
gem 'split', require: 'split/dashboard'
```
Then adding this to config/routes.rb
```ruby
mount Split::Dashboard, at: 'split'
```
You may want to password protect that page, you can do so with `Rack::Auth::Basic` (in your split initializer file)
```ruby
# Rails apps or apps that already depend on activesupport
Split::Dashboard.use Rack::Auth::Basic do |username, password|
# Protect against timing attacks:
# - Use & (do not use &&) so that it doesn't short circuit.
# - Use digests to stop length information leaking
ActiveSupport::SecurityUtils.secure_compare(::Digest::SHA256.hexdigest(username), ::Digest::SHA256.hexdigest(ENV["SPLIT_USERNAME"])) &
ActiveSupport::SecurityUtils.secure_compare(::Digest::SHA256.hexdigest(password), ::Digest::SHA256.hexdigest(ENV["SPLIT_PASSWORD"]))
end
# Apps without activesupport
Split::Dashboard.use Rack::Auth::Basic do |username, password|
# Protect against timing attacks:
# - Use & (do not use &&) so that it doesn't short circuit.
# - Use digests to stop length information leaking
Rack::Utils.secure_compare(::Digest::SHA256.hexdigest(username), ::Digest::SHA256.hexdigest(ENV["SPLIT_USERNAME"])) &
Rack::Utils.secure_compare(::Digest::SHA256.hexdigest(password), ::Digest::SHA256.hexdigest(ENV["SPLIT_PASSWORD"]))
end
```
You can even use Devise or any other Warden-based authentication method to authorize users. Just replace `mount Split::Dashboard, :at => 'split'` in `config/routes.rb` with the following:
```ruby
match "/split" => Split::Dashboard, anchor: false, via: [:get, :post, :delete], constraints: -> (request) do
request.env['warden'].authenticated? # are we authenticated?
request.env['warden'].authenticate! # authenticate if not already
# or even check any other condition such as request.env['warden'].user.is_admin?
end
```
More information on this [here](https://steve.dynedge.co.uk/2011/12/09/controlling-access-to-routes-and-rack-apps-in-rails-3-with-devise-and-warden/)
### Screenshot

## Configuration
You can override the default configuration options of Split like so:
```ruby
Split.configure do |config|
config.db_failover = true # handle Redis errors gracefully
config.db_failover_on_db_error = -> (error) { Rails.logger.error(error.message) }
config.allow_multiple_experiments = true
config.enabled = true
config.persistence = Split::Persistence::SessionAdapter
#config.start_manually = false ## new test will have to be started manually from the admin panel. default false
#config.reset_manually = false ## if true, it never resets the experiment data, even if the configuration changes
config.include_rails_helper = true
config.redis = "redis://custom.redis.url:6380"
end
```
Split looks for the Redis host in the environment variable `REDIS_URL` then
defaults to `redis://localhost:6379` if not specified by configure block.
On platforms like Heroku, Split will use the value of `REDIS_PROVIDER` to
determine which env variable key to use when retrieving the host config. This
defaults to `REDIS_URL`.
### Filtering
In most scenarios you don't want to have AB-Testing enabled for web spiders, robots or special groups of users.
Split provides functionality to filter this based on a predefined, extensible list of bots, IP-lists or custom exclude logic.
```ruby
Split.configure do |config|
# bot config
config.robot_regex = /my_custom_robot_regex/ # or
config.bots['newbot'] = "Description for bot with 'newbot' user agent, which will be added to config.robot_regex for exclusion"
# IP config
config.ignore_ip_addresses << '81.19.48.130' # or regex: /81\.19\.48\.[0-9]+/
# or provide your own filter functionality, the default is proc{ |request| is_robot? || is_ignored_ip_address? || is_preview? }
config.ignore_filter = -> (request) { CustomExcludeLogic.excludes?(request) }
end
```
### Experiment configuration
Instead of providing the experiment options inline, you can store them
in a hash. This hash can control your experiment's alternatives, weights,
algorithm and if the experiment resets once finished:
```ruby
Split.configure do |config|
config.experiments = {
my_first_experiment: {
alternatives: ["a", "b"],
resettable: false
},
:my_second_experiment => {
algorithm: 'Split::Algorithms::Whiplash',
alternatives: [
{ name: "a", percent: 67 },
{ name: "b", percent: 33 }
]
}
}
end
```
You can also store your experiments in a YAML file:
```ruby
Split.configure do |config|
config.experiments = YAML.load_file "config/experiments.yml"
end
```
You can then define the YAML file like:
```yaml
my_first_experiment:
alternatives:
- a
- b
my_second_experiment:
alternatives:
- name: a
percent: 67
- name: b
percent: 33
resettable: false
```
This simplifies the calls from your code:
```ruby
ab_test(:my_first_experiment)
```
and:
```ruby
ab_finished(:my_first_experiment)
```
You can also add meta data for each experiment, which is very useful when you need more than an alternative name to change behaviour:
```ruby
Split.configure do |config|
config.experiments = {
my_first_experiment: {
alternatives: ["a", "b"],
metadata: {
"a" => {"text" => "Have a fantastic day"},
"b" => {"text" => "Don't get hit by a bus"}
}
}
}
end
```
```yaml
my_first_experiment:
alternatives:
- a
- b
metadata:
a:
text: "Have a fantastic day"
b:
text: "Don't get hit by a bus"
```
This allows for some advanced experiment configuration using methods like:
```ruby
trial.alternative.name # => "a"
trial.metadata['text'] # => "Have a fantastic day"
```
or in views:
```erb
<% ab_test("my_first_experiment") do |alternative, meta| %>
<%= alternative %>
<small><%= meta['text'] %></small>
<% end %>
```
The keys used in meta data should be Strings
#### Metrics
You might wish to track generic metrics, such as conversions, and use
those to complete multiple different experiments without adding more to
your code. You can use the configuration hash to do this, thanks to
the `:metric` option.
```ruby
Split.configure do |config|
config.experiments = {
my_first_experiment: {
alternatives: ["a", "b"],
metric: :my_metric
}
trial.alternative.name
# if the goal has been achieved, increment the successful completions for this alternative.
if goal_acheived?
trial.complete!
end
```ruby
ab_finished(:my_metric)
```
You can also create a new metric by instantiating and saving a new Metric object.
```ruby
Split::Metric.new(:my_metric)
Split::Metric.save
```
#### Goals
You might wish to allow an experiment to have multiple, distinguishable goals.
The API to define goals for an experiment is this:
```ruby
ab_test({link_color: ["purchase", "refund"]}, "red", "blue")
```
or you can define them in a configuration file:
```ruby
Split.configure do |config|
config.experiments = {
link_color: {
alternatives: ["red", "blue"],
goals: ["purchase", "refund"]
}
}
end
```
To complete a goal conversion, you do it like:
```ruby
ab_finished(link_color: "purchase")
```
Note that if you pass additional options, that should be a separate hash:
```ruby
ab_finished({ link_color: "purchase" }, reset: false)
```
**NOTE:** This does not mean that a single experiment can complete more than one goal.
Once you finish one of the goals, the test is considered to be completed, and finishing the other goal will no longer register. (Assuming the test runs with `reset: false`.)
**Good Example**: Test if listing Plan A first result in more conversions to Plan A (goal: "plana_conversion") or Plan B (goal: "planb_conversion").
**Bad Example**: Test if button color increases conversion rate through multiple steps of a funnel. THIS WILL NOT WORK.
**Bad Example**: Test both how button color affects signup *and* how it affects login, at the same time. THIS WILL NOT WORK.
#### Combined Experiments
If you want to test how button color affects signup *and* how it affects login at the same time, use combined experiments.
Configure like so:
```ruby
Split.configuration.experiments = {
:button_color_experiment => {
:alternatives => ["blue", "green"],
:combined_experiments => ["button_color_on_signup", "button_color_on_login"]
}
}
```
Starting the combined test starts all combined experiments
```ruby
ab_combined_test(:button_color_experiment)
```
Finish each combined test as normal
```ruby
ab_finished(:button_color_on_login)
ab_finished(:button_color_on_signup)
```
**Additional Configuration**:
* Be sure to enable `allow_multiple_experiments`
* In Sinatra include the CombinedExperimentsHelper
```
helpers Split::CombinedExperimentsHelper
```
### DB failover solution
Due to the fact that Redis has no automatic failover mechanism, it's
possible to switch on the `db_failover` config option, so that `ab_test`
and `ab_finished` will not crash in case of a db failure. `ab_test` always
delivers alternative A (the first one) in that case.
It's also possible to set a `db_failover_on_db_error` callback (proc)
for example to log these errors via Rails.logger.
### Redis
You may want to change the Redis host and port Split connects to, or
set various other options at startup.
Split has a `redis` setter which can be given a string or a Redis
object. This means if you're already using Redis in your app, Split
can re-use the existing connection.
String: `Split.redis = 'redis://localhost:6379'`
Redis: `Split.redis = $redis`
For our rails app we have a `config/initializers/split.rb` file where
we load `config/split.yml` by hand and set the Redis information
appropriately.
Here's our `config/split.yml`:
```yml
development: redis://localhost:6379
test: redis://localhost:6379
staging: redis://redis1.example.com:6379
fi: redis://localhost:6379
production: redis://redis1.example.com:6379
```
And our initializer:
```ruby
split_config = YAML.load_file(Rails.root.join('config', 'split.yml'))
Split.redis = split_config[Rails.env]
```
### Redis Caching (v4.0+)
In some high-volume usage scenarios, Redis load can be incurred by repeated
fetches for fairly static data. Enabling caching will reduce this load.
```ruby
Split.configuration.cache = true
````
This currently caches:
- `Split::ExperimentCatalog.find`
- `Split::Experiment.start_time`
- `Split::Experiment.winner`
## Namespaces
If you're running multiple, separate instances of Split you may want
to namespace the keyspaces so they do not overlap. This is not unlike
the approach taken by many memcached clients.
This feature can be provided by the [redis-namespace](https://github.com/defunkt/redis-namespace)
library. To configure Split to use `Redis::Namespace`, do the following:
1. Add `redis-namespace` to your Gemfile:
```ruby
gem 'redis-namespace'
```
2. Configure `Split.redis` to use a `Redis::Namespace` instance (possible in an
initializer):
```ruby
redis = Redis.new(url: ENV['REDIS_URL']) # or whatever config you want
Split.redis = Redis::Namespace.new(:your_namespace, redis: redis)
```
## Outside of a Web Session
Split provides the Helper module to facilitate running experiments inside web sessions.
Alternatively, you can access the underlying Metric, Trial, Experiment and Alternative objects to
conduct experiments that are not tied to a web session.
```ruby
# create a new experiment
experiment = Split::ExperimentCatalog.find_or_create('color', 'red', 'blue')
# create a new trial
trial = Split::Trial.new(:experiment => experiment)
# run trial
trial.choose!
# get the result, returns either red or blue
trial.alternative.name
# if the goal has been achieved, increment the successful completions for this alternative.
if goal_achieved?
trial.complete!
end
```
## Algorithms
By default, Split ships with `Split::Algorithms::WeightedSample` that randomly selects from possible alternatives for a traditional a/b test.
It is possible to specify static weights to favor certain alternatives.
`Split::Algorithms::Whiplash` is an implementation of a [multi-armed bandit algorithm](http://stevehanov.ca/blog/index.php?id=132).
This algorithm will automatically weight the alternatives based on their relative performance,
choosing the better-performing ones more often as trials are completed.
`Split::Algorithms::BlockRandomization` is an algorithm that ensures equal
participation across all alternatives. This algorithm will choose the alternative
with the fewest participants. In the event of multiple minimum participant alternatives
(i.e. starting a new "Block") the algorithm will choose a random alternative from
those minimum participant alternatives.
Users may also write their own algorithms. The default algorithm may be specified globally in the configuration file, or on a per experiment basis using the experiments hash of the configuration file.
To change the algorithm globally for all experiments, use the following in your initializer:
```ruby
Split.configure do |config|
config.algorithm = Split::Algorithms::Whiplash
end
```
## Extensions
- [Split::Export](https://github.com/splitrb/split-export) - Easily export A/B test data out of Split.
- [Split::Analytics](https://github.com/splitrb/split-analytics) - Push test data to Google Analytics.
- [Split::Mongoid](https://github.com/MongoHQ/split-mongoid) - Store experiment data in mongoid (still uses redis).
- [Split::Cacheable](https://github.com/harrystech/split_cacheable) - Automatically create cache buckets per test.
- [Split::Counters](https://github.com/bernardkroes/split-counters) - Add counters per experiment and alternative.
- [Split::Cli](https://github.com/craigmcnamara/split-cli) - A CLI to trigger Split A/B tests.
## Screencast
Ryan bates has produced an excellent 10 minute screencast about split on the Railscasts site: [A/B Testing with Split](http://railscasts.com/episodes/331-a-b-testing-with-split)
## Blogposts
* [Recipe: A/B testing with KISSMetrics and the split gem](https://robots.thoughtbot.com/post/9595887299/recipe-a-b-testing-with-kissmetrics-and-the-split-gem)
* [Rails A/B testing with Split on Heroku](http://blog.nathanhumbert.com/2012/02/rails-ab-testing-with-split-on-heroku.html)
## Backers
Support us with a monthly donation and help us continue our activities. [[Become a backer](https://opencollective.com/split#backer)]
<a href="https://opencollective.com/split/backer/0/website" target="_blank"><img src="https://opencollective.com/split/backer/0/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/1/website" target="_blank"><img src="https://opencollective.com/split/backer/1/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/2/website" target="_blank"><img src="https://opencollective.com/split/backer/2/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/3/website" target="_blank"><img src="https://opencollective.com/split/backer/3/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/4/website" target="_blank"><img src="https://opencollective.com/split/backer/4/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/5/website" target="_blank"><img src="https://opencollective.com/split/backer/5/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/6/website" target="_blank"><img src="https://opencollective.com/split/backer/6/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/7/website" target="_blank"><img src="https://opencollective.com/split/backer/7/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/8/website" target="_blank"><img src="https://opencollective.com/split/backer/8/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/9/website" target="_blank"><img src="https://opencollective.com/split/backer/9/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/10/website" target="_blank"><img src="https://opencollective.com/split/backer/10/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/11/website" target="_blank"><img src="https://opencollective.com/split/backer/11/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/12/website" target="_blank"><img src="https://opencollective.com/split/backer/12/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/13/website" target="_blank"><img src="https://opencollective.com/split/backer/13/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/14/website" target="_blank"><img src="https://opencollective.com/split/backer/14/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/15/website" target="_blank"><img src="https://opencollective.com/split/backer/15/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/16/website" target="_blank"><img src="https://opencollective.com/split/backer/16/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/17/website" target="_blank"><img src="https://opencollective.com/split/backer/17/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/18/website" target="_blank"><img src="https://opencollective.com/split/backer/18/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/19/website" target="_blank"><img src="https://opencollective.com/split/backer/19/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/20/website" target="_blank"><img src="https://opencollective.com/split/backer/20/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/21/website" target="_blank"><img src="https://opencollective.com/split/backer/21/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/22/website" target="_blank"><img src="https://opencollective.com/split/backer/22/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/23/website" target="_blank"><img src="https://opencollective.com/split/backer/23/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/24/website" target="_blank"><img src="https://opencollective.com/split/backer/24/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/25/website" target="_blank"><img src="https://opencollective.com/split/backer/25/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/26/website" target="_blank"><img src="https://opencollective.com/split/backer/26/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/27/website" target="_blank"><img src="https://opencollective.com/split/backer/27/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/28/website" target="_blank"><img src="https://opencollective.com/split/backer/28/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/29/website" target="_blank"><img src="https://opencollective.com/split/backer/29/avatar.svg"></a>
## Sponsors
Become a sponsor and get your logo on our README on Github with a link to your site. [[Become a sponsor](https://opencollective.com/split#sponsor)]
<a href="https://opencollective.com/split/sponsor/0/website" target="_blank"><img src="https://opencollective.com/split/sponsor/0/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/1/website" target="_blank"><img src="https://opencollective.com/split/sponsor/1/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/2/website" target="_blank"><img src="https://opencollective.com/split/sponsor/2/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/3/website" target="_blank"><img src="https://opencollective.com/split/sponsor/3/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/4/website" target="_blank"><img src="https://opencollective.com/split/sponsor/4/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/5/website" target="_blank"><img src="https://opencollective.com/split/sponsor/5/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/6/website" target="_blank"><img src="https://opencollective.com/split/sponsor/6/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/7/website" target="_blank"><img src="https://opencollective.com/split/sponsor/7/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/8/website" target="_blank"><img src="https://opencollective.com/split/sponsor/8/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/9/website" target="_blank"><img src="https://opencollective.com/split/sponsor/9/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/10/website" target="_blank"><img src="https://opencollective.com/split/sponsor/10/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/11/website" target="_blank"><img src="https://opencollective.com/split/sponsor/11/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/12/website" target="_blank"><img src="https://opencollective.com/split/sponsor/12/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/13/website" target="_blank"><img src="https://opencollective.com/split/sponsor/13/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/14/website" target="_blank"><img src="https://opencollective.com/split/sponsor/14/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/15/website" target="_blank"><img src="https://opencollective.com/split/sponsor/15/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/16/website" target="_blank"><img src="https://opencollective.com/split/sponsor/16/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/17/website" target="_blank"><img src="https://opencollective.com/split/sponsor/17/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/18/website" target="_blank"><img src="https://opencollective.com/split/sponsor/18/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/19/website" target="_blank"><img src="https://opencollective.com/split/sponsor/19/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/20/website" target="_blank"><img src="https://opencollective.com/split/sponsor/20/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/21/website" target="_blank"><img src="https://opencollective.com/split/sponsor/21/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/22/website" target="_blank"><img src="https://opencollective.com/split/sponsor/22/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/23/website" target="_blank"><img src="https://opencollective.com/split/sponsor/23/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/24/website" target="_blank"><img src="https://opencollective.com/split/sponsor/24/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/25/website" target="_blank"><img src="https://opencollective.com/split/sponsor/25/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/26/website" target="_blank"><img src="https://opencollective.com/split/sponsor/26/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/27/website" target="_blank"><img src="https://opencollective.com/split/sponsor/27/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/28/website" target="_blank"><img src="https://opencollective.com/split/sponsor/28/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/29/website" target="_blank"><img src="https://opencollective.com/split/sponsor/29/avatar.svg"></a>
## Contribute
Please do! Over 70 different people have contributed to the project, you can see them all here: https://github.com/splitrb/split/graphs/contributors.
### Development
The source code is hosted at [GitHub](https://github.com/splitrb/split).
Report issues and feature requests on [GitHub Issues](https://github.com/splitrb/split/issues).
You can find a discussion form on [Google Groups](https://groups.google.com/d/forum/split-ruby).
### Tests
Run the tests like this:
# Start a Redis server in another tab.
redis-server
bundle
rake spec
### A Note on Patches and Pull Requests
* Fork the project.
* Make your feature addition or bug fix.
* Add tests for it. This is important so I don't break it in a
future version unintentionally.
* Add documentation if necessary.
* Commit. Do not mess with the rakefile, version, or history.
(If you want to have your own version, that is fine. But bump the version in a commit by itself, which I can ignore when I pull.)
* Send a pull request. Bonus points for topic branches.
### Code of Conduct
Please note that this project is released with a [Contributor Code of Conduct](CODE_OF_CONDUCT.md). By participating in this project you agree to abide by its terms.
## Copyright
[MIT License](LICENSE) © 2019 [Andrew Nesbitt](https://github.com/andrew).
<MSG> Merge pull request #368 from hkliya/master
Fix typo in README
<DFF> @@ -628,7 +628,7 @@ trial.choose!
trial.alternative.name
# if the goal has been achieved, increment the successful completions for this alternative.
-if goal_acheived?
+if goal_achieved?
trial.complete!
end
| 1 | Merge pull request #368 from hkliya/master | 1 | .md | md | mit | splitrb/split |
10071057 | <NME> README.md
<BEF> # [Split](https://libraries.io/rubygems/split)
[](http://badge.fury.io/rb/split)

[](https://codeclimate.com/github/splitrb/split)
[](https://codeclimate.com/github/splitrb/split/coverage)
[](https://github.com/RichardLitt/standard-readme)
[](https://www.codetriage.com/splitrb/split)
> 📈 The Rack Based A/B testing framework https://libraries.io/rubygems/split
Split is a rack based A/B testing framework designed to work with Rails, Sinatra or any other rack based app.
Split is heavily inspired by the [Abingo](https://github.com/ryanb/abingo) and [Vanity](https://github.com/assaf/vanity) Rails A/B testing plugins and [Resque](https://github.com/resque/resque) in its use of Redis.
Split is designed to be hacker friendly, allowing for maximum customisation and extensibility.
## Install
### Requirements
Split v4.0+ is currently tested with Ruby >= 2.5 and Rails >= 5.2.
If your project requires compatibility with Ruby 2.4.x or older Rails versions. You can try v3.0 or v0.8.0(for Ruby 1.9.3)
Split uses Redis as a datastore.
Split only supports Redis 4.0 or greater.
If you're on OS X, Homebrew is the simplest way to install Redis:
```bash
brew install redis
redis-server /usr/local/etc/redis.conf
```
You now have a Redis daemon running on port `6379`.
### Setup
```bash
gem install split
```
#### Rails
Adding `gem 'split'` to your Gemfile will autoload it when rails starts up, as long as you've configured Redis it will 'just work'.
#### Sinatra
To configure Sinatra with Split you need to enable sessions and mix in the helper methods. Add the following lines at the top of your Sinatra app:
```ruby
require 'split'
class MySinatraApp < Sinatra::Base
enable :sessions
helpers Split::Helper
get '/' do
...
end
```
## Usage
To begin your A/B test use the `ab_test` method, naming your experiment with the first argument and then the different alternatives which you wish to test on as the other arguments.
`ab_test` returns one of the alternatives, if a user has already seen that test they will get the same alternative as before, which you can use to split your code on.
It can be used to render different templates, show different text or any other case based logic.
`ab_finished` is used to make a completion of an experiment, or conversion.
Example: View
```erb
<% ab_test(:login_button, "/images/button1.jpg", "/images/button2.jpg") do |button_file| %>
<%= image_tag(button_file, alt: "Login!") %>
<% end %>
```
Example: Controller
```ruby
def register_new_user
# See what level of free points maximizes users' decision to buy replacement points.
@starter_points = ab_test(:new_user_free_points, '100', '200', '300')
end
```
Example: Conversion tracking (in a controller!)
```ruby
def buy_new_points
# some business logic
ab_finished(:new_user_free_points)
end
```
Example: Conversion tracking (in a view)
```erb
Thanks for signing up, dude! <% ab_finished(:signup_page_redesign) %>
```
You can find more examples, tutorials and guides on the [wiki](https://github.com/splitrb/split/wiki).
## Statistical Validity
Split has two options for you to use to determine which alternative is the best.
The first option (default on the dashboard) uses a z test (n>30) for the difference between your control and alternative conversion rates to calculate statistical significance. This test will tell you whether an alternative is better or worse than your control, but it will not distinguish between which alternative is the best in an experiment with multiple alternatives. Split will only tell you if your experiment is 90%, 95%, or 99% significant, and this test only works if you have more than 30 participants and 5 conversions for each branch.
As per this [blog post](https://www.evanmiller.org/how-not-to-run-an-ab-test.html) on the pitfalls of A/B testing, it is highly recommended that you determine your requisite sample size for each branch before running the experiment. Otherwise, you'll have an increased rate of false positives (experiments which show a significant effect where really there is none).
[Here](https://www.evanmiller.org/ab-testing/sample-size.html) is a sample size calculator for your convenience.
The second option uses simulations from a beta distribution to determine the probability that the given alternative is the winner compared to all other alternatives. You can view these probabilities by clicking on the drop-down menu labeled "Confidence." This option should be used when the experiment has more than just 1 control and 1 alternative. It can also be used for a simple, 2-alternative A/B test.
Calculating the beta-distribution simulations for a large number of experiments can be slow, so the results are cached. You can specify how often they should be recalculated (the default is once per day).
```ruby
Split.configure do |config|
config.winning_alternative_recalculation_interval = 3600 # 1 hour
end
```
## Extras
### Weighted alternatives
Perhaps you only want to show an alternative to 10% of your visitors because it is very experimental or not yet fully load tested.
To do this you can pass a weight with each alternative in the following ways:
```ruby
ab_test(:homepage_design, {'Old' => 18}, {'New' => 2})
ab_test(:homepage_design, 'Old', {'New' => 1.0/9})
ab_test(:homepage_design, {'Old' => 9}, 'New')
```
This will only show the new alternative to visitors 1 in 10 times, the default weight for an alternative is 1.
### Overriding alternatives
For development and testing, you may wish to force your app to always return an alternative.
You can do this by passing it as a parameter in the url.
If you have an experiment called `button_color` with alternatives called `red` and `blue` used on your homepage, a url such as:
http://myawesomesite.com?ab_test[button_color]=red
will always have red buttons. This won't be stored in your session or count towards to results, unless you set the `store_override` configuration option.
In the event you want to disable all tests without having to know the individual experiment names, add a `SPLIT_DISABLE` query parameter.
http://myawesomesite.com?SPLIT_DISABLE=true
It is not required to send `SPLIT_DISABLE=false` to activate Split.
### Rspec Helper
To aid testing with RSpec, write `spec/support/split_helper.rb` and call `use_ab_test(alternatives_by_experiment)` in your specs as instructed below:
```ruby
# Create a file with these contents at 'spec/support/split_helper.rb'
# and ensure it is `require`d in your rails_helper.rb or spec_helper.rb
module SplitHelper
# Force a specific experiment alternative to always be returned:
# use_ab_test(signup_form: "single_page")
#
# Force alternatives for multiple experiments:
# use_ab_test(signup_form: "single_page", pricing: "show_enterprise_prices")
#
def use_ab_test(alternatives_by_experiment)
allow_any_instance_of(Split::Helper).to receive(:ab_test) do |_receiver, experiment, &block|
variant = alternatives_by_experiment.fetch(experiment) { |key| raise "Unknown experiment '#{key}'" }
block.call(variant) unless block.nil?
variant
end
end
end
# Make the `use_ab_test` method available to all specs:
RSpec.configure do |config|
config.include SplitHelper
end
```
Now you can call `use_ab_test(alternatives_by_experiment)` in your specs, for example:
```ruby
it "registers using experimental signup" do
use_ab_test experiment_name: "alternative_name"
post "/signups"
...
end
```
### Starting experiments manually
By default new A/B tests will be active right after deployment. In case you would like to start new test a while after
the deploy, you can do it by setting the `start_manually` configuration option to `true`.
After choosing this option tests won't be started right after deploy, but after pressing the `Start` button in Split admin dashboard. If a test is deleted from the Split dashboard, then it can only be started after pressing the `Start` button whenever being re-initialized.
### Reset after completion
When a user completes a test their session is reset so that they may start the test again in the future.
To stop this behaviour you can pass the following option to the `ab_finished` method:
```ruby
ab_finished(:experiment_name, reset: false)
```
The user will then always see the alternative they started with.
Any old unfinished experiment key will be deleted from the user's data storage if the experiment had been removed or is over and a winner had been chosen. This allows a user to enroll into any new experiment in cases when the `allow_multiple_experiments` config option is set to `false`.
### Reset experiments manually
By default Split automatically resets the experiment whenever it detects the configuration for an experiment has changed (e.g. you call `ab_test` with different alternatives). You can prevent this by setting the option `reset_manually` to `true`.
You may want to do this when you want to change something, like the variants' names, the metadata about an experiment, etc. without resetting everything.
### Multiple experiments at once
By default Split will avoid users participating in multiple experiments at once. This means you are less likely to skew results by adding in more variation to your tests.
To stop this behaviour and allow users to participate in multiple experiments at once set the `allow_multiple_experiments` config option to true like so:
```ruby
Split.configure do |config|
config.allow_multiple_experiments = true
end
```
This will allow the user to participate in any number of experiments and belong to any alternative in each experiment. This has the possible downside of a variation in one experiment influencing the outcome of another.
To address this, setting the `allow_multiple_experiments` config option to 'control' like so:
```ruby
Split.configure do |config|
config.allow_multiple_experiments = 'control'
end
```
For this to work, each and every experiment you define must have an alternative named 'control'. This will allow the user to participate in multiple experiments as long as the user belongs to the alternative 'control' in each experiment. As soon as the user belongs to an alternative named something other than 'control' the user may not participate in any more experiments. Calling ab_test(<other experiments>) will always return the first alternative without adding the user to that experiment.
### Experiment Persistence
Split comes with three built-in persistence adapters for storing users and the alternatives they've been given for each experiment.
By default Split will store the tests for each user in the session.
You can optionally configure Split to use a cookie, Redis, or any custom adapter of your choosing.
#### Cookies
```ruby
Split.configure do |config|
config.persistence = :cookie
end
```
When using the cookie persistence, Split stores data into an anonymous tracking cookie named 'split', which expires in 1 year. To change that, set the `persistence_cookie_length` in the configuration (unit of time in seconds).
```ruby
Split.configure do |config|
config.persistence = :cookie
config.persistence_cookie_length = 2592000 # 30 days
end
```
The data stored consists of the experiment name and the variants the user is in. Example: { "experiment_name" => "variant_a" }
__Note:__ Using cookies depends on `ActionDispatch::Cookies` or any identical API
#### Redis
Using Redis will allow ab_users to persist across sessions or machines.
```ruby
Split.configure do |config|
config.persistence = Split::Persistence::RedisAdapter.with_config(lookup_by: -> (context) { context.current_user_id })
# Equivalent
# config.persistence = Split::Persistence::RedisAdapter.with_config(lookup_by: :current_user_id)
end
```
Options:
* `lookup_by`: method to invoke per request for uniquely identifying ab_users (mandatory configuration)
* `namespace`: separate namespace to store these persisted values (default "persistence")
* `expire_seconds`: sets TTL for user key. (if a user is in multiple experiments most recent update will reset TTL for all their assignments)
#### Dual Adapter
The Dual Adapter allows the use of different persistence adapters for logged-in and logged-out users. A common use case is to use Redis for logged-in users and Cookies for logged-out users.
```ruby
cookie_adapter = Split::Persistence::CookieAdapter
redis_adapter = Split::Persistence::RedisAdapter.with_config(
lookup_by: -> (context) { context.send(:current_user).try(:id) },
expire_seconds: 2592000)
Split.configure do |config|
config.persistence = Split::Persistence::DualAdapter.with_config(
logged_in: -> (context) { !context.send(:current_user).try(:id).nil? },
logged_in_adapter: redis_adapter,
logged_out_adapter: cookie_adapter)
config.persistence_cookie_length = 2592000 # 30 days
end
```
#### Custom Adapter
Your custom adapter needs to implement the same API as existing adapters.
See `Split::Persistence::CookieAdapter` or `Split::Persistence::SessionAdapter` for a starting point.
```ruby
Split.configure do |config|
config.persistence = YourCustomAdapterClass
end
```
### Trial Event Hooks
You can define methods that will be called at the same time as experiment
alternative participation and goal completion.
For example:
``` ruby
Split.configure do |config|
config.on_trial = :log_trial # run on every trial
config.on_trial_choose = :log_trial_choose # run on trials with new users only
config.on_trial_complete = :log_trial_complete
end
```
Set these attributes to a method name available in the same context as the
`ab_test` method. These methods should accept one argument, a `Trial` instance.
``` ruby
def log_trial(trial)
logger.info "experiment=%s alternative=%s user=%s" %
[ trial.experiment.name, trial.alternative, current_user.id ]
end
def log_trial_choose(trial)
logger.info "[new user] experiment=%s alternative=%s user=%s" %
[ trial.experiment.name, trial.alternative, current_user.id ]
end
def log_trial_complete(trial)
logger.info "experiment=%s alternative=%s user=%s complete=true" %
[ trial.experiment.name, trial.alternative, current_user.id ]
end
```
#### Views
If you are running `ab_test` from a view, you must define your event
hook callback as a
[helper_method](https://apidock.com/rails/AbstractController/Helpers/ClassMethods/helper_method)
in the controller:
``` ruby
helper_method :log_trial_choose
def log_trial_choose(trial)
logger.info "experiment=%s alternative=%s user=%s" %
[ trial.experiment.name, trial.alternative, current_user.id ]
end
```
### Experiment Hooks
You can assign a proc that will be called when an experiment is reset or deleted. You can use these hooks to call methods within your application to keep data related to experiments in sync with Split.
For example:
``` ruby
Split.configure do |config|
# after experiment reset or deleted
config.on_experiment_reset = -> (example) { # Do something on reset }
config.on_experiment_delete = -> (experiment) { # Do something else on delete }
# before experiment reset or deleted
config.on_before_experiment_reset = -> (example) { # Do something on reset }
config.on_before_experiment_delete = -> (experiment) { # Do something else on delete }
# after experiment winner had been set
config.on_experiment_winner_choose = -> (experiment) { # Do something on winner choose }
end
```
## Web Interface
Split comes with a Sinatra-based front end to get an overview of how your experiments are doing.
If you are running Rails 2: You can mount this inside your app using Rack::URLMap in your `config.ru`
```ruby
require 'split/dashboard'
run Rack::URLMap.new \
"/" => Your::App.new,
"/split" => Split::Dashboard.new
```
However, if you are using Rails 3 or higher: You can mount this inside your app routes by first adding this to the Gemfile:
```ruby
gem 'split', require: 'split/dashboard'
```
Then adding this to config/routes.rb
```ruby
mount Split::Dashboard, at: 'split'
```
You may want to password protect that page, you can do so with `Rack::Auth::Basic` (in your split initializer file)
```ruby
# Rails apps or apps that already depend on activesupport
Split::Dashboard.use Rack::Auth::Basic do |username, password|
# Protect against timing attacks:
# - Use & (do not use &&) so that it doesn't short circuit.
# - Use digests to stop length information leaking
ActiveSupport::SecurityUtils.secure_compare(::Digest::SHA256.hexdigest(username), ::Digest::SHA256.hexdigest(ENV["SPLIT_USERNAME"])) &
ActiveSupport::SecurityUtils.secure_compare(::Digest::SHA256.hexdigest(password), ::Digest::SHA256.hexdigest(ENV["SPLIT_PASSWORD"]))
end
# Apps without activesupport
Split::Dashboard.use Rack::Auth::Basic do |username, password|
# Protect against timing attacks:
# - Use & (do not use &&) so that it doesn't short circuit.
# - Use digests to stop length information leaking
Rack::Utils.secure_compare(::Digest::SHA256.hexdigest(username), ::Digest::SHA256.hexdigest(ENV["SPLIT_USERNAME"])) &
Rack::Utils.secure_compare(::Digest::SHA256.hexdigest(password), ::Digest::SHA256.hexdigest(ENV["SPLIT_PASSWORD"]))
end
```
You can even use Devise or any other Warden-based authentication method to authorize users. Just replace `mount Split::Dashboard, :at => 'split'` in `config/routes.rb` with the following:
```ruby
match "/split" => Split::Dashboard, anchor: false, via: [:get, :post, :delete], constraints: -> (request) do
request.env['warden'].authenticated? # are we authenticated?
request.env['warden'].authenticate! # authenticate if not already
# or even check any other condition such as request.env['warden'].user.is_admin?
end
```
More information on this [here](https://steve.dynedge.co.uk/2011/12/09/controlling-access-to-routes-and-rack-apps-in-rails-3-with-devise-and-warden/)
### Screenshot

## Configuration
You can override the default configuration options of Split like so:
```ruby
Split.configure do |config|
config.db_failover = true # handle Redis errors gracefully
config.db_failover_on_db_error = -> (error) { Rails.logger.error(error.message) }
config.allow_multiple_experiments = true
config.enabled = true
config.persistence = Split::Persistence::SessionAdapter
#config.start_manually = false ## new test will have to be started manually from the admin panel. default false
#config.reset_manually = false ## if true, it never resets the experiment data, even if the configuration changes
config.include_rails_helper = true
config.redis = "redis://custom.redis.url:6380"
end
```
Split looks for the Redis host in the environment variable `REDIS_URL` then
defaults to `redis://localhost:6379` if not specified by configure block.
On platforms like Heroku, Split will use the value of `REDIS_PROVIDER` to
determine which env variable key to use when retrieving the host config. This
defaults to `REDIS_URL`.
### Filtering
In most scenarios you don't want to have AB-Testing enabled for web spiders, robots or special groups of users.
Split provides functionality to filter this based on a predefined, extensible list of bots, IP-lists or custom exclude logic.
```ruby
Split.configure do |config|
# bot config
config.robot_regex = /my_custom_robot_regex/ # or
config.bots['newbot'] = "Description for bot with 'newbot' user agent, which will be added to config.robot_regex for exclusion"
# IP config
config.ignore_ip_addresses << '81.19.48.130' # or regex: /81\.19\.48\.[0-9]+/
# or provide your own filter functionality, the default is proc{ |request| is_robot? || is_ignored_ip_address? || is_preview? }
config.ignore_filter = -> (request) { CustomExcludeLogic.excludes?(request) }
end
```
### Experiment configuration
Instead of providing the experiment options inline, you can store them
in a hash. This hash can control your experiment's alternatives, weights,
algorithm and if the experiment resets once finished:
```ruby
Split.configure do |config|
config.experiments = {
my_first_experiment: {
alternatives: ["a", "b"],
resettable: false
},
:my_second_experiment => {
algorithm: 'Split::Algorithms::Whiplash',
alternatives: [
{ name: "a", percent: 67 },
{ name: "b", percent: 33 }
]
}
}
end
```
You can also store your experiments in a YAML file:
```ruby
Split.configure do |config|
config.experiments = YAML.load_file "config/experiments.yml"
end
```
You can then define the YAML file like:
```yaml
my_first_experiment:
alternatives:
- a
- b
my_second_experiment:
alternatives:
- name: a
percent: 67
- name: b
percent: 33
resettable: false
```
This simplifies the calls from your code:
```ruby
ab_test(:my_first_experiment)
```
and:
```ruby
ab_finished(:my_first_experiment)
```
You can also add meta data for each experiment, which is very useful when you need more than an alternative name to change behaviour:
```ruby
Split.configure do |config|
config.experiments = {
my_first_experiment: {
alternatives: ["a", "b"],
metadata: {
"a" => {"text" => "Have a fantastic day"},
"b" => {"text" => "Don't get hit by a bus"}
}
}
}
end
```
```yaml
my_first_experiment:
alternatives:
- a
- b
metadata:
a:
text: "Have a fantastic day"
b:
text: "Don't get hit by a bus"
```
This allows for some advanced experiment configuration using methods like:
```ruby
trial.alternative.name # => "a"
trial.metadata['text'] # => "Have a fantastic day"
```
or in views:
```erb
<% ab_test("my_first_experiment") do |alternative, meta| %>
<%= alternative %>
<small><%= meta['text'] %></small>
<% end %>
```
The keys used in meta data should be Strings
#### Metrics
You might wish to track generic metrics, such as conversions, and use
those to complete multiple different experiments without adding more to
your code. You can use the configuration hash to do this, thanks to
the `:metric` option.
```ruby
Split.configure do |config|
config.experiments = {
my_first_experiment: {
alternatives: ["a", "b"],
metric: :my_metric
}
trial.alternative.name
# if the goal has been achieved, increment the successful completions for this alternative.
if goal_acheived?
trial.complete!
end
```ruby
ab_finished(:my_metric)
```
You can also create a new metric by instantiating and saving a new Metric object.
```ruby
Split::Metric.new(:my_metric)
Split::Metric.save
```
#### Goals
You might wish to allow an experiment to have multiple, distinguishable goals.
The API to define goals for an experiment is this:
```ruby
ab_test({link_color: ["purchase", "refund"]}, "red", "blue")
```
or you can define them in a configuration file:
```ruby
Split.configure do |config|
config.experiments = {
link_color: {
alternatives: ["red", "blue"],
goals: ["purchase", "refund"]
}
}
end
```
To complete a goal conversion, you do it like:
```ruby
ab_finished(link_color: "purchase")
```
Note that if you pass additional options, that should be a separate hash:
```ruby
ab_finished({ link_color: "purchase" }, reset: false)
```
**NOTE:** This does not mean that a single experiment can complete more than one goal.
Once you finish one of the goals, the test is considered to be completed, and finishing the other goal will no longer register. (Assuming the test runs with `reset: false`.)
**Good Example**: Test if listing Plan A first result in more conversions to Plan A (goal: "plana_conversion") or Plan B (goal: "planb_conversion").
**Bad Example**: Test if button color increases conversion rate through multiple steps of a funnel. THIS WILL NOT WORK.
**Bad Example**: Test both how button color affects signup *and* how it affects login, at the same time. THIS WILL NOT WORK.
#### Combined Experiments
If you want to test how button color affects signup *and* how it affects login at the same time, use combined experiments.
Configure like so:
```ruby
Split.configuration.experiments = {
:button_color_experiment => {
:alternatives => ["blue", "green"],
:combined_experiments => ["button_color_on_signup", "button_color_on_login"]
}
}
```
Starting the combined test starts all combined experiments
```ruby
ab_combined_test(:button_color_experiment)
```
Finish each combined test as normal
```ruby
ab_finished(:button_color_on_login)
ab_finished(:button_color_on_signup)
```
**Additional Configuration**:
* Be sure to enable `allow_multiple_experiments`
* In Sinatra include the CombinedExperimentsHelper
```
helpers Split::CombinedExperimentsHelper
```
### DB failover solution
Due to the fact that Redis has no automatic failover mechanism, it's
possible to switch on the `db_failover` config option, so that `ab_test`
and `ab_finished` will not crash in case of a db failure. `ab_test` always
delivers alternative A (the first one) in that case.
It's also possible to set a `db_failover_on_db_error` callback (proc)
for example to log these errors via Rails.logger.
### Redis
You may want to change the Redis host and port Split connects to, or
set various other options at startup.
Split has a `redis` setter which can be given a string or a Redis
object. This means if you're already using Redis in your app, Split
can re-use the existing connection.
String: `Split.redis = 'redis://localhost:6379'`
Redis: `Split.redis = $redis`
For our rails app we have a `config/initializers/split.rb` file where
we load `config/split.yml` by hand and set the Redis information
appropriately.
Here's our `config/split.yml`:
```yml
development: redis://localhost:6379
test: redis://localhost:6379
staging: redis://redis1.example.com:6379
fi: redis://localhost:6379
production: redis://redis1.example.com:6379
```
And our initializer:
```ruby
split_config = YAML.load_file(Rails.root.join('config', 'split.yml'))
Split.redis = split_config[Rails.env]
```
### Redis Caching (v4.0+)
In some high-volume usage scenarios, Redis load can be incurred by repeated
fetches for fairly static data. Enabling caching will reduce this load.
```ruby
Split.configuration.cache = true
````
This currently caches:
- `Split::ExperimentCatalog.find`
- `Split::Experiment.start_time`
- `Split::Experiment.winner`
## Namespaces
If you're running multiple, separate instances of Split you may want
to namespace the keyspaces so they do not overlap. This is not unlike
the approach taken by many memcached clients.
This feature can be provided by the [redis-namespace](https://github.com/defunkt/redis-namespace)
library. To configure Split to use `Redis::Namespace`, do the following:
1. Add `redis-namespace` to your Gemfile:
```ruby
gem 'redis-namespace'
```
2. Configure `Split.redis` to use a `Redis::Namespace` instance (possible in an
initializer):
```ruby
redis = Redis.new(url: ENV['REDIS_URL']) # or whatever config you want
Split.redis = Redis::Namespace.new(:your_namespace, redis: redis)
```
## Outside of a Web Session
Split provides the Helper module to facilitate running experiments inside web sessions.
Alternatively, you can access the underlying Metric, Trial, Experiment and Alternative objects to
conduct experiments that are not tied to a web session.
```ruby
# create a new experiment
experiment = Split::ExperimentCatalog.find_or_create('color', 'red', 'blue')
# create a new trial
trial = Split::Trial.new(:experiment => experiment)
# run trial
trial.choose!
# get the result, returns either red or blue
trial.alternative.name
# if the goal has been achieved, increment the successful completions for this alternative.
if goal_achieved?
trial.complete!
end
```
## Algorithms
By default, Split ships with `Split::Algorithms::WeightedSample` that randomly selects from possible alternatives for a traditional a/b test.
It is possible to specify static weights to favor certain alternatives.
`Split::Algorithms::Whiplash` is an implementation of a [multi-armed bandit algorithm](http://stevehanov.ca/blog/index.php?id=132).
This algorithm will automatically weight the alternatives based on their relative performance,
choosing the better-performing ones more often as trials are completed.
`Split::Algorithms::BlockRandomization` is an algorithm that ensures equal
participation across all alternatives. This algorithm will choose the alternative
with the fewest participants. In the event of multiple minimum participant alternatives
(i.e. starting a new "Block") the algorithm will choose a random alternative from
those minimum participant alternatives.
Users may also write their own algorithms. The default algorithm may be specified globally in the configuration file, or on a per experiment basis using the experiments hash of the configuration file.
To change the algorithm globally for all experiments, use the following in your initializer:
```ruby
Split.configure do |config|
config.algorithm = Split::Algorithms::Whiplash
end
```
## Extensions
- [Split::Export](https://github.com/splitrb/split-export) - Easily export A/B test data out of Split.
- [Split::Analytics](https://github.com/splitrb/split-analytics) - Push test data to Google Analytics.
- [Split::Mongoid](https://github.com/MongoHQ/split-mongoid) - Store experiment data in mongoid (still uses redis).
- [Split::Cacheable](https://github.com/harrystech/split_cacheable) - Automatically create cache buckets per test.
- [Split::Counters](https://github.com/bernardkroes/split-counters) - Add counters per experiment and alternative.
- [Split::Cli](https://github.com/craigmcnamara/split-cli) - A CLI to trigger Split A/B tests.
## Screencast
Ryan bates has produced an excellent 10 minute screencast about split on the Railscasts site: [A/B Testing with Split](http://railscasts.com/episodes/331-a-b-testing-with-split)
## Blogposts
* [Recipe: A/B testing with KISSMetrics and the split gem](https://robots.thoughtbot.com/post/9595887299/recipe-a-b-testing-with-kissmetrics-and-the-split-gem)
* [Rails A/B testing with Split on Heroku](http://blog.nathanhumbert.com/2012/02/rails-ab-testing-with-split-on-heroku.html)
## Backers
Support us with a monthly donation and help us continue our activities. [[Become a backer](https://opencollective.com/split#backer)]
<a href="https://opencollective.com/split/backer/0/website" target="_blank"><img src="https://opencollective.com/split/backer/0/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/1/website" target="_blank"><img src="https://opencollective.com/split/backer/1/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/2/website" target="_blank"><img src="https://opencollective.com/split/backer/2/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/3/website" target="_blank"><img src="https://opencollective.com/split/backer/3/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/4/website" target="_blank"><img src="https://opencollective.com/split/backer/4/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/5/website" target="_blank"><img src="https://opencollective.com/split/backer/5/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/6/website" target="_blank"><img src="https://opencollective.com/split/backer/6/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/7/website" target="_blank"><img src="https://opencollective.com/split/backer/7/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/8/website" target="_blank"><img src="https://opencollective.com/split/backer/8/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/9/website" target="_blank"><img src="https://opencollective.com/split/backer/9/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/10/website" target="_blank"><img src="https://opencollective.com/split/backer/10/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/11/website" target="_blank"><img src="https://opencollective.com/split/backer/11/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/12/website" target="_blank"><img src="https://opencollective.com/split/backer/12/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/13/website" target="_blank"><img src="https://opencollective.com/split/backer/13/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/14/website" target="_blank"><img src="https://opencollective.com/split/backer/14/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/15/website" target="_blank"><img src="https://opencollective.com/split/backer/15/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/16/website" target="_blank"><img src="https://opencollective.com/split/backer/16/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/17/website" target="_blank"><img src="https://opencollective.com/split/backer/17/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/18/website" target="_blank"><img src="https://opencollective.com/split/backer/18/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/19/website" target="_blank"><img src="https://opencollective.com/split/backer/19/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/20/website" target="_blank"><img src="https://opencollective.com/split/backer/20/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/21/website" target="_blank"><img src="https://opencollective.com/split/backer/21/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/22/website" target="_blank"><img src="https://opencollective.com/split/backer/22/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/23/website" target="_blank"><img src="https://opencollective.com/split/backer/23/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/24/website" target="_blank"><img src="https://opencollective.com/split/backer/24/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/25/website" target="_blank"><img src="https://opencollective.com/split/backer/25/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/26/website" target="_blank"><img src="https://opencollective.com/split/backer/26/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/27/website" target="_blank"><img src="https://opencollective.com/split/backer/27/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/28/website" target="_blank"><img src="https://opencollective.com/split/backer/28/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/29/website" target="_blank"><img src="https://opencollective.com/split/backer/29/avatar.svg"></a>
## Sponsors
Become a sponsor and get your logo on our README on Github with a link to your site. [[Become a sponsor](https://opencollective.com/split#sponsor)]
<a href="https://opencollective.com/split/sponsor/0/website" target="_blank"><img src="https://opencollective.com/split/sponsor/0/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/1/website" target="_blank"><img src="https://opencollective.com/split/sponsor/1/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/2/website" target="_blank"><img src="https://opencollective.com/split/sponsor/2/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/3/website" target="_blank"><img src="https://opencollective.com/split/sponsor/3/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/4/website" target="_blank"><img src="https://opencollective.com/split/sponsor/4/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/5/website" target="_blank"><img src="https://opencollective.com/split/sponsor/5/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/6/website" target="_blank"><img src="https://opencollective.com/split/sponsor/6/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/7/website" target="_blank"><img src="https://opencollective.com/split/sponsor/7/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/8/website" target="_blank"><img src="https://opencollective.com/split/sponsor/8/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/9/website" target="_blank"><img src="https://opencollective.com/split/sponsor/9/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/10/website" target="_blank"><img src="https://opencollective.com/split/sponsor/10/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/11/website" target="_blank"><img src="https://opencollective.com/split/sponsor/11/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/12/website" target="_blank"><img src="https://opencollective.com/split/sponsor/12/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/13/website" target="_blank"><img src="https://opencollective.com/split/sponsor/13/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/14/website" target="_blank"><img src="https://opencollective.com/split/sponsor/14/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/15/website" target="_blank"><img src="https://opencollective.com/split/sponsor/15/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/16/website" target="_blank"><img src="https://opencollective.com/split/sponsor/16/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/17/website" target="_blank"><img src="https://opencollective.com/split/sponsor/17/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/18/website" target="_blank"><img src="https://opencollective.com/split/sponsor/18/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/19/website" target="_blank"><img src="https://opencollective.com/split/sponsor/19/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/20/website" target="_blank"><img src="https://opencollective.com/split/sponsor/20/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/21/website" target="_blank"><img src="https://opencollective.com/split/sponsor/21/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/22/website" target="_blank"><img src="https://opencollective.com/split/sponsor/22/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/23/website" target="_blank"><img src="https://opencollective.com/split/sponsor/23/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/24/website" target="_blank"><img src="https://opencollective.com/split/sponsor/24/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/25/website" target="_blank"><img src="https://opencollective.com/split/sponsor/25/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/26/website" target="_blank"><img src="https://opencollective.com/split/sponsor/26/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/27/website" target="_blank"><img src="https://opencollective.com/split/sponsor/27/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/28/website" target="_blank"><img src="https://opencollective.com/split/sponsor/28/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/29/website" target="_blank"><img src="https://opencollective.com/split/sponsor/29/avatar.svg"></a>
## Contribute
Please do! Over 70 different people have contributed to the project, you can see them all here: https://github.com/splitrb/split/graphs/contributors.
### Development
The source code is hosted at [GitHub](https://github.com/splitrb/split).
Report issues and feature requests on [GitHub Issues](https://github.com/splitrb/split/issues).
You can find a discussion form on [Google Groups](https://groups.google.com/d/forum/split-ruby).
### Tests
Run the tests like this:
# Start a Redis server in another tab.
redis-server
bundle
rake spec
### A Note on Patches and Pull Requests
* Fork the project.
* Make your feature addition or bug fix.
* Add tests for it. This is important so I don't break it in a
future version unintentionally.
* Add documentation if necessary.
* Commit. Do not mess with the rakefile, version, or history.
(If you want to have your own version, that is fine. But bump the version in a commit by itself, which I can ignore when I pull.)
* Send a pull request. Bonus points for topic branches.
### Code of Conduct
Please note that this project is released with a [Contributor Code of Conduct](CODE_OF_CONDUCT.md). By participating in this project you agree to abide by its terms.
## Copyright
[MIT License](LICENSE) © 2019 [Andrew Nesbitt](https://github.com/andrew).
<MSG> Merge pull request #368 from hkliya/master
Fix typo in README
<DFF> @@ -628,7 +628,7 @@ trial.choose!
trial.alternative.name
# if the goal has been achieved, increment the successful completions for this alternative.
-if goal_acheived?
+if goal_achieved?
trial.complete!
end
| 1 | Merge pull request #368 from hkliya/master | 1 | .md | md | mit | splitrb/split |
10071058 | <NME> README.md
<BEF> # [Split](https://libraries.io/rubygems/split)
[](http://badge.fury.io/rb/split)

[](https://codeclimate.com/github/splitrb/split)
[](https://codeclimate.com/github/splitrb/split/coverage)
[](https://github.com/RichardLitt/standard-readme)
[](https://www.codetriage.com/splitrb/split)
> 📈 The Rack Based A/B testing framework https://libraries.io/rubygems/split
Split is a rack based A/B testing framework designed to work with Rails, Sinatra or any other rack based app.
Split is heavily inspired by the [Abingo](https://github.com/ryanb/abingo) and [Vanity](https://github.com/assaf/vanity) Rails A/B testing plugins and [Resque](https://github.com/resque/resque) in its use of Redis.
Split is designed to be hacker friendly, allowing for maximum customisation and extensibility.
## Install
### Requirements
Split v4.0+ is currently tested with Ruby >= 2.5 and Rails >= 5.2.
If your project requires compatibility with Ruby 2.4.x or older Rails versions. You can try v3.0 or v0.8.0(for Ruby 1.9.3)
Split uses Redis as a datastore.
Split only supports Redis 4.0 or greater.
If you're on OS X, Homebrew is the simplest way to install Redis:
```bash
brew install redis
redis-server /usr/local/etc/redis.conf
```
You now have a Redis daemon running on port `6379`.
### Setup
```bash
gem install split
```
#### Rails
Adding `gem 'split'` to your Gemfile will autoload it when rails starts up, as long as you've configured Redis it will 'just work'.
#### Sinatra
To configure Sinatra with Split you need to enable sessions and mix in the helper methods. Add the following lines at the top of your Sinatra app:
```ruby
require 'split'
class MySinatraApp < Sinatra::Base
enable :sessions
helpers Split::Helper
get '/' do
...
end
```
## Usage
To begin your A/B test use the `ab_test` method, naming your experiment with the first argument and then the different alternatives which you wish to test on as the other arguments.
`ab_test` returns one of the alternatives, if a user has already seen that test they will get the same alternative as before, which you can use to split your code on.
It can be used to render different templates, show different text or any other case based logic.
`ab_finished` is used to make a completion of an experiment, or conversion.
Example: View
```erb
<% ab_test(:login_button, "/images/button1.jpg", "/images/button2.jpg") do |button_file| %>
<%= image_tag(button_file, alt: "Login!") %>
<% end %>
```
Example: Controller
```ruby
def register_new_user
# See what level of free points maximizes users' decision to buy replacement points.
@starter_points = ab_test(:new_user_free_points, '100', '200', '300')
end
```
Example: Conversion tracking (in a controller!)
```ruby
def buy_new_points
# some business logic
ab_finished(:new_user_free_points)
end
```
Example: Conversion tracking (in a view)
```erb
Thanks for signing up, dude! <% ab_finished(:signup_page_redesign) %>
```
You can find more examples, tutorials and guides on the [wiki](https://github.com/splitrb/split/wiki).
## Statistical Validity
Split has two options for you to use to determine which alternative is the best.
The first option (default on the dashboard) uses a z test (n>30) for the difference between your control and alternative conversion rates to calculate statistical significance. This test will tell you whether an alternative is better or worse than your control, but it will not distinguish between which alternative is the best in an experiment with multiple alternatives. Split will only tell you if your experiment is 90%, 95%, or 99% significant, and this test only works if you have more than 30 participants and 5 conversions for each branch.
As per this [blog post](https://www.evanmiller.org/how-not-to-run-an-ab-test.html) on the pitfalls of A/B testing, it is highly recommended that you determine your requisite sample size for each branch before running the experiment. Otherwise, you'll have an increased rate of false positives (experiments which show a significant effect where really there is none).
[Here](https://www.evanmiller.org/ab-testing/sample-size.html) is a sample size calculator for your convenience.
The second option uses simulations from a beta distribution to determine the probability that the given alternative is the winner compared to all other alternatives. You can view these probabilities by clicking on the drop-down menu labeled "Confidence." This option should be used when the experiment has more than just 1 control and 1 alternative. It can also be used for a simple, 2-alternative A/B test.
Calculating the beta-distribution simulations for a large number of experiments can be slow, so the results are cached. You can specify how often they should be recalculated (the default is once per day).
```ruby
Split.configure do |config|
config.winning_alternative_recalculation_interval = 3600 # 1 hour
end
```
## Extras
### Weighted alternatives
Perhaps you only want to show an alternative to 10% of your visitors because it is very experimental or not yet fully load tested.
To do this you can pass a weight with each alternative in the following ways:
```ruby
ab_test(:homepage_design, {'Old' => 18}, {'New' => 2})
ab_test(:homepage_design, 'Old', {'New' => 1.0/9})
ab_test(:homepage_design, {'Old' => 9}, 'New')
```
This will only show the new alternative to visitors 1 in 10 times, the default weight for an alternative is 1.
### Overriding alternatives
For development and testing, you may wish to force your app to always return an alternative.
You can do this by passing it as a parameter in the url.
If you have an experiment called `button_color` with alternatives called `red` and `blue` used on your homepage, a url such as:
http://myawesomesite.com?ab_test[button_color]=red
will always have red buttons. This won't be stored in your session or count towards to results, unless you set the `store_override` configuration option.
In the event you want to disable all tests without having to know the individual experiment names, add a `SPLIT_DISABLE` query parameter.
http://myawesomesite.com?SPLIT_DISABLE=true
It is not required to send `SPLIT_DISABLE=false` to activate Split.
### Rspec Helper
To aid testing with RSpec, write `spec/support/split_helper.rb` and call `use_ab_test(alternatives_by_experiment)` in your specs as instructed below:
```ruby
# Create a file with these contents at 'spec/support/split_helper.rb'
# and ensure it is `require`d in your rails_helper.rb or spec_helper.rb
module SplitHelper
# Force a specific experiment alternative to always be returned:
# use_ab_test(signup_form: "single_page")
#
# Force alternatives for multiple experiments:
# use_ab_test(signup_form: "single_page", pricing: "show_enterprise_prices")
#
def use_ab_test(alternatives_by_experiment)
allow_any_instance_of(Split::Helper).to receive(:ab_test) do |_receiver, experiment, &block|
variant = alternatives_by_experiment.fetch(experiment) { |key| raise "Unknown experiment '#{key}'" }
block.call(variant) unless block.nil?
variant
end
end
end
# Make the `use_ab_test` method available to all specs:
RSpec.configure do |config|
config.include SplitHelper
end
```
Now you can call `use_ab_test(alternatives_by_experiment)` in your specs, for example:
```ruby
it "registers using experimental signup" do
use_ab_test experiment_name: "alternative_name"
post "/signups"
...
end
```
### Starting experiments manually
By default new A/B tests will be active right after deployment. In case you would like to start new test a while after
the deploy, you can do it by setting the `start_manually` configuration option to `true`.
After choosing this option tests won't be started right after deploy, but after pressing the `Start` button in Split admin dashboard. If a test is deleted from the Split dashboard, then it can only be started after pressing the `Start` button whenever being re-initialized.
### Reset after completion
When a user completes a test their session is reset so that they may start the test again in the future.
To stop this behaviour you can pass the following option to the `ab_finished` method:
```ruby
ab_finished(:experiment_name, reset: false)
```
The user will then always see the alternative they started with.
Any old unfinished experiment key will be deleted from the user's data storage if the experiment had been removed or is over and a winner had been chosen. This allows a user to enroll into any new experiment in cases when the `allow_multiple_experiments` config option is set to `false`.
### Reset experiments manually
By default Split automatically resets the experiment whenever it detects the configuration for an experiment has changed (e.g. you call `ab_test` with different alternatives). You can prevent this by setting the option `reset_manually` to `true`.
You may want to do this when you want to change something, like the variants' names, the metadata about an experiment, etc. without resetting everything.
### Multiple experiments at once
By default Split will avoid users participating in multiple experiments at once. This means you are less likely to skew results by adding in more variation to your tests.
To stop this behaviour and allow users to participate in multiple experiments at once set the `allow_multiple_experiments` config option to true like so:
```ruby
Split.configure do |config|
config.allow_multiple_experiments = true
end
```
This will allow the user to participate in any number of experiments and belong to any alternative in each experiment. This has the possible downside of a variation in one experiment influencing the outcome of another.
To address this, setting the `allow_multiple_experiments` config option to 'control' like so:
```ruby
Split.configure do |config|
config.allow_multiple_experiments = 'control'
end
```
For this to work, each and every experiment you define must have an alternative named 'control'. This will allow the user to participate in multiple experiments as long as the user belongs to the alternative 'control' in each experiment. As soon as the user belongs to an alternative named something other than 'control' the user may not participate in any more experiments. Calling ab_test(<other experiments>) will always return the first alternative without adding the user to that experiment.
### Experiment Persistence
Split comes with three built-in persistence adapters for storing users and the alternatives they've been given for each experiment.
By default Split will store the tests for each user in the session.
You can optionally configure Split to use a cookie, Redis, or any custom adapter of your choosing.
#### Cookies
```ruby
Split.configure do |config|
config.persistence = :cookie
end
```
When using the cookie persistence, Split stores data into an anonymous tracking cookie named 'split', which expires in 1 year. To change that, set the `persistence_cookie_length` in the configuration (unit of time in seconds).
```ruby
Split.configure do |config|
config.persistence = :cookie
config.persistence_cookie_length = 2592000 # 30 days
end
```
The data stored consists of the experiment name and the variants the user is in. Example: { "experiment_name" => "variant_a" }
__Note:__ Using cookies depends on `ActionDispatch::Cookies` or any identical API
#### Redis
Using Redis will allow ab_users to persist across sessions or machines.
```ruby
Split.configure do |config|
config.persistence = Split::Persistence::RedisAdapter.with_config(lookup_by: -> (context) { context.current_user_id })
# Equivalent
# config.persistence = Split::Persistence::RedisAdapter.with_config(lookup_by: :current_user_id)
end
```
Options:
* `lookup_by`: method to invoke per request for uniquely identifying ab_users (mandatory configuration)
* `namespace`: separate namespace to store these persisted values (default "persistence")
* `expire_seconds`: sets TTL for user key. (if a user is in multiple experiments most recent update will reset TTL for all their assignments)
#### Dual Adapter
The Dual Adapter allows the use of different persistence adapters for logged-in and logged-out users. A common use case is to use Redis for logged-in users and Cookies for logged-out users.
```ruby
cookie_adapter = Split::Persistence::CookieAdapter
redis_adapter = Split::Persistence::RedisAdapter.with_config(
lookup_by: -> (context) { context.send(:current_user).try(:id) },
expire_seconds: 2592000)
Split.configure do |config|
config.persistence = Split::Persistence::DualAdapter.with_config(
logged_in: -> (context) { !context.send(:current_user).try(:id).nil? },
logged_in_adapter: redis_adapter,
logged_out_adapter: cookie_adapter)
config.persistence_cookie_length = 2592000 # 30 days
end
```
#### Custom Adapter
Your custom adapter needs to implement the same API as existing adapters.
See `Split::Persistence::CookieAdapter` or `Split::Persistence::SessionAdapter` for a starting point.
```ruby
Split.configure do |config|
config.persistence = YourCustomAdapterClass
end
```
### Trial Event Hooks
You can define methods that will be called at the same time as experiment
alternative participation and goal completion.
For example:
``` ruby
Split.configure do |config|
config.on_trial = :log_trial # run on every trial
config.on_trial_choose = :log_trial_choose # run on trials with new users only
config.on_trial_complete = :log_trial_complete
end
```
Set these attributes to a method name available in the same context as the
`ab_test` method. These methods should accept one argument, a `Trial` instance.
``` ruby
def log_trial(trial)
logger.info "experiment=%s alternative=%s user=%s" %
[ trial.experiment.name, trial.alternative, current_user.id ]
end
def log_trial_choose(trial)
logger.info "[new user] experiment=%s alternative=%s user=%s" %
[ trial.experiment.name, trial.alternative, current_user.id ]
end
def log_trial_complete(trial)
logger.info "experiment=%s alternative=%s user=%s complete=true" %
[ trial.experiment.name, trial.alternative, current_user.id ]
end
```
#### Views
If you are running `ab_test` from a view, you must define your event
hook callback as a
[helper_method](https://apidock.com/rails/AbstractController/Helpers/ClassMethods/helper_method)
in the controller:
``` ruby
helper_method :log_trial_choose
def log_trial_choose(trial)
logger.info "experiment=%s alternative=%s user=%s" %
[ trial.experiment.name, trial.alternative, current_user.id ]
end
```
### Experiment Hooks
You can assign a proc that will be called when an experiment is reset or deleted. You can use these hooks to call methods within your application to keep data related to experiments in sync with Split.
For example:
``` ruby
Split.configure do |config|
# after experiment reset or deleted
config.on_experiment_reset = -> (example) { # Do something on reset }
config.on_experiment_delete = -> (experiment) { # Do something else on delete }
# before experiment reset or deleted
config.on_before_experiment_reset = -> (example) { # Do something on reset }
config.on_before_experiment_delete = -> (experiment) { # Do something else on delete }
# after experiment winner had been set
config.on_experiment_winner_choose = -> (experiment) { # Do something on winner choose }
end
```
## Web Interface
Split comes with a Sinatra-based front end to get an overview of how your experiments are doing.
If you are running Rails 2: You can mount this inside your app using Rack::URLMap in your `config.ru`
```ruby
require 'split/dashboard'
run Rack::URLMap.new \
"/" => Your::App.new,
"/split" => Split::Dashboard.new
```
However, if you are using Rails 3 or higher: You can mount this inside your app routes by first adding this to the Gemfile:
```ruby
gem 'split', require: 'split/dashboard'
```
Then adding this to config/routes.rb
```ruby
mount Split::Dashboard, at: 'split'
```
You may want to password protect that page, you can do so with `Rack::Auth::Basic` (in your split initializer file)
```ruby
# Rails apps or apps that already depend on activesupport
Split::Dashboard.use Rack::Auth::Basic do |username, password|
# Protect against timing attacks:
# - Use & (do not use &&) so that it doesn't short circuit.
# - Use digests to stop length information leaking
ActiveSupport::SecurityUtils.secure_compare(::Digest::SHA256.hexdigest(username), ::Digest::SHA256.hexdigest(ENV["SPLIT_USERNAME"])) &
ActiveSupport::SecurityUtils.secure_compare(::Digest::SHA256.hexdigest(password), ::Digest::SHA256.hexdigest(ENV["SPLIT_PASSWORD"]))
end
# Apps without activesupport
Split::Dashboard.use Rack::Auth::Basic do |username, password|
# Protect against timing attacks:
# - Use & (do not use &&) so that it doesn't short circuit.
# - Use digests to stop length information leaking
Rack::Utils.secure_compare(::Digest::SHA256.hexdigest(username), ::Digest::SHA256.hexdigest(ENV["SPLIT_USERNAME"])) &
Rack::Utils.secure_compare(::Digest::SHA256.hexdigest(password), ::Digest::SHA256.hexdigest(ENV["SPLIT_PASSWORD"]))
end
```
You can even use Devise or any other Warden-based authentication method to authorize users. Just replace `mount Split::Dashboard, :at => 'split'` in `config/routes.rb` with the following:
```ruby
match "/split" => Split::Dashboard, anchor: false, via: [:get, :post, :delete], constraints: -> (request) do
request.env['warden'].authenticated? # are we authenticated?
request.env['warden'].authenticate! # authenticate if not already
# or even check any other condition such as request.env['warden'].user.is_admin?
end
```
More information on this [here](https://steve.dynedge.co.uk/2011/12/09/controlling-access-to-routes-and-rack-apps-in-rails-3-with-devise-and-warden/)
### Screenshot

## Configuration
You can override the default configuration options of Split like so:
```ruby
Split.configure do |config|
config.db_failover = true # handle Redis errors gracefully
config.db_failover_on_db_error = -> (error) { Rails.logger.error(error.message) }
config.allow_multiple_experiments = true
config.enabled = true
config.persistence = Split::Persistence::SessionAdapter
#config.start_manually = false ## new test will have to be started manually from the admin panel. default false
#config.reset_manually = false ## if true, it never resets the experiment data, even if the configuration changes
config.include_rails_helper = true
config.redis = "redis://custom.redis.url:6380"
end
```
Split looks for the Redis host in the environment variable `REDIS_URL` then
defaults to `redis://localhost:6379` if not specified by configure block.
On platforms like Heroku, Split will use the value of `REDIS_PROVIDER` to
determine which env variable key to use when retrieving the host config. This
defaults to `REDIS_URL`.
### Filtering
In most scenarios you don't want to have AB-Testing enabled for web spiders, robots or special groups of users.
Split provides functionality to filter this based on a predefined, extensible list of bots, IP-lists or custom exclude logic.
```ruby
Split.configure do |config|
# bot config
config.robot_regex = /my_custom_robot_regex/ # or
config.bots['newbot'] = "Description for bot with 'newbot' user agent, which will be added to config.robot_regex for exclusion"
# IP config
config.ignore_ip_addresses << '81.19.48.130' # or regex: /81\.19\.48\.[0-9]+/
# or provide your own filter functionality, the default is proc{ |request| is_robot? || is_ignored_ip_address? || is_preview? }
config.ignore_filter = -> (request) { CustomExcludeLogic.excludes?(request) }
end
```
### Experiment configuration
Instead of providing the experiment options inline, you can store them
in a hash. This hash can control your experiment's alternatives, weights,
algorithm and if the experiment resets once finished:
```ruby
Split.configure do |config|
config.experiments = {
my_first_experiment: {
alternatives: ["a", "b"],
resettable: false
},
:my_second_experiment => {
algorithm: 'Split::Algorithms::Whiplash',
alternatives: [
{ name: "a", percent: 67 },
{ name: "b", percent: 33 }
]
}
}
end
```
You can also store your experiments in a YAML file:
```ruby
Split.configure do |config|
config.experiments = YAML.load_file "config/experiments.yml"
end
```
You can then define the YAML file like:
```yaml
my_first_experiment:
alternatives:
- a
- b
my_second_experiment:
alternatives:
- name: a
percent: 67
- name: b
percent: 33
resettable: false
```
This simplifies the calls from your code:
```ruby
ab_test(:my_first_experiment)
```
and:
```ruby
ab_finished(:my_first_experiment)
```
You can also add meta data for each experiment, which is very useful when you need more than an alternative name to change behaviour:
```ruby
Split.configure do |config|
config.experiments = {
my_first_experiment: {
alternatives: ["a", "b"],
metadata: {
"a" => {"text" => "Have a fantastic day"},
"b" => {"text" => "Don't get hit by a bus"}
}
}
}
end
```
```yaml
my_first_experiment:
alternatives:
- a
- b
metadata:
a:
text: "Have a fantastic day"
b:
text: "Don't get hit by a bus"
```
This allows for some advanced experiment configuration using methods like:
```ruby
trial.alternative.name # => "a"
trial.metadata['text'] # => "Have a fantastic day"
```
or in views:
```erb
<% ab_test("my_first_experiment") do |alternative, meta| %>
<%= alternative %>
<small><%= meta['text'] %></small>
<% end %>
```
The keys used in meta data should be Strings
#### Metrics
You might wish to track generic metrics, such as conversions, and use
those to complete multiple different experiments without adding more to
your code. You can use the configuration hash to do this, thanks to
the `:metric` option.
```ruby
Split.configure do |config|
config.experiments = {
my_first_experiment: {
alternatives: ["a", "b"],
metric: :my_metric
}
trial.alternative.name
# if the goal has been achieved, increment the successful completions for this alternative.
if goal_acheived?
trial.complete!
end
```ruby
ab_finished(:my_metric)
```
You can also create a new metric by instantiating and saving a new Metric object.
```ruby
Split::Metric.new(:my_metric)
Split::Metric.save
```
#### Goals
You might wish to allow an experiment to have multiple, distinguishable goals.
The API to define goals for an experiment is this:
```ruby
ab_test({link_color: ["purchase", "refund"]}, "red", "blue")
```
or you can define them in a configuration file:
```ruby
Split.configure do |config|
config.experiments = {
link_color: {
alternatives: ["red", "blue"],
goals: ["purchase", "refund"]
}
}
end
```
To complete a goal conversion, you do it like:
```ruby
ab_finished(link_color: "purchase")
```
Note that if you pass additional options, that should be a separate hash:
```ruby
ab_finished({ link_color: "purchase" }, reset: false)
```
**NOTE:** This does not mean that a single experiment can complete more than one goal.
Once you finish one of the goals, the test is considered to be completed, and finishing the other goal will no longer register. (Assuming the test runs with `reset: false`.)
**Good Example**: Test if listing Plan A first result in more conversions to Plan A (goal: "plana_conversion") or Plan B (goal: "planb_conversion").
**Bad Example**: Test if button color increases conversion rate through multiple steps of a funnel. THIS WILL NOT WORK.
**Bad Example**: Test both how button color affects signup *and* how it affects login, at the same time. THIS WILL NOT WORK.
#### Combined Experiments
If you want to test how button color affects signup *and* how it affects login at the same time, use combined experiments.
Configure like so:
```ruby
Split.configuration.experiments = {
:button_color_experiment => {
:alternatives => ["blue", "green"],
:combined_experiments => ["button_color_on_signup", "button_color_on_login"]
}
}
```
Starting the combined test starts all combined experiments
```ruby
ab_combined_test(:button_color_experiment)
```
Finish each combined test as normal
```ruby
ab_finished(:button_color_on_login)
ab_finished(:button_color_on_signup)
```
**Additional Configuration**:
* Be sure to enable `allow_multiple_experiments`
* In Sinatra include the CombinedExperimentsHelper
```
helpers Split::CombinedExperimentsHelper
```
### DB failover solution
Due to the fact that Redis has no automatic failover mechanism, it's
possible to switch on the `db_failover` config option, so that `ab_test`
and `ab_finished` will not crash in case of a db failure. `ab_test` always
delivers alternative A (the first one) in that case.
It's also possible to set a `db_failover_on_db_error` callback (proc)
for example to log these errors via Rails.logger.
### Redis
You may want to change the Redis host and port Split connects to, or
set various other options at startup.
Split has a `redis` setter which can be given a string or a Redis
object. This means if you're already using Redis in your app, Split
can re-use the existing connection.
String: `Split.redis = 'redis://localhost:6379'`
Redis: `Split.redis = $redis`
For our rails app we have a `config/initializers/split.rb` file where
we load `config/split.yml` by hand and set the Redis information
appropriately.
Here's our `config/split.yml`:
```yml
development: redis://localhost:6379
test: redis://localhost:6379
staging: redis://redis1.example.com:6379
fi: redis://localhost:6379
production: redis://redis1.example.com:6379
```
And our initializer:
```ruby
split_config = YAML.load_file(Rails.root.join('config', 'split.yml'))
Split.redis = split_config[Rails.env]
```
### Redis Caching (v4.0+)
In some high-volume usage scenarios, Redis load can be incurred by repeated
fetches for fairly static data. Enabling caching will reduce this load.
```ruby
Split.configuration.cache = true
````
This currently caches:
- `Split::ExperimentCatalog.find`
- `Split::Experiment.start_time`
- `Split::Experiment.winner`
## Namespaces
If you're running multiple, separate instances of Split you may want
to namespace the keyspaces so they do not overlap. This is not unlike
the approach taken by many memcached clients.
This feature can be provided by the [redis-namespace](https://github.com/defunkt/redis-namespace)
library. To configure Split to use `Redis::Namespace`, do the following:
1. Add `redis-namespace` to your Gemfile:
```ruby
gem 'redis-namespace'
```
2. Configure `Split.redis` to use a `Redis::Namespace` instance (possible in an
initializer):
```ruby
redis = Redis.new(url: ENV['REDIS_URL']) # or whatever config you want
Split.redis = Redis::Namespace.new(:your_namespace, redis: redis)
```
## Outside of a Web Session
Split provides the Helper module to facilitate running experiments inside web sessions.
Alternatively, you can access the underlying Metric, Trial, Experiment and Alternative objects to
conduct experiments that are not tied to a web session.
```ruby
# create a new experiment
experiment = Split::ExperimentCatalog.find_or_create('color', 'red', 'blue')
# create a new trial
trial = Split::Trial.new(:experiment => experiment)
# run trial
trial.choose!
# get the result, returns either red or blue
trial.alternative.name
# if the goal has been achieved, increment the successful completions for this alternative.
if goal_achieved?
trial.complete!
end
```
## Algorithms
By default, Split ships with `Split::Algorithms::WeightedSample` that randomly selects from possible alternatives for a traditional a/b test.
It is possible to specify static weights to favor certain alternatives.
`Split::Algorithms::Whiplash` is an implementation of a [multi-armed bandit algorithm](http://stevehanov.ca/blog/index.php?id=132).
This algorithm will automatically weight the alternatives based on their relative performance,
choosing the better-performing ones more often as trials are completed.
`Split::Algorithms::BlockRandomization` is an algorithm that ensures equal
participation across all alternatives. This algorithm will choose the alternative
with the fewest participants. In the event of multiple minimum participant alternatives
(i.e. starting a new "Block") the algorithm will choose a random alternative from
those minimum participant alternatives.
Users may also write their own algorithms. The default algorithm may be specified globally in the configuration file, or on a per experiment basis using the experiments hash of the configuration file.
To change the algorithm globally for all experiments, use the following in your initializer:
```ruby
Split.configure do |config|
config.algorithm = Split::Algorithms::Whiplash
end
```
## Extensions
- [Split::Export](https://github.com/splitrb/split-export) - Easily export A/B test data out of Split.
- [Split::Analytics](https://github.com/splitrb/split-analytics) - Push test data to Google Analytics.
- [Split::Mongoid](https://github.com/MongoHQ/split-mongoid) - Store experiment data in mongoid (still uses redis).
- [Split::Cacheable](https://github.com/harrystech/split_cacheable) - Automatically create cache buckets per test.
- [Split::Counters](https://github.com/bernardkroes/split-counters) - Add counters per experiment and alternative.
- [Split::Cli](https://github.com/craigmcnamara/split-cli) - A CLI to trigger Split A/B tests.
## Screencast
Ryan bates has produced an excellent 10 minute screencast about split on the Railscasts site: [A/B Testing with Split](http://railscasts.com/episodes/331-a-b-testing-with-split)
## Blogposts
* [Recipe: A/B testing with KISSMetrics and the split gem](https://robots.thoughtbot.com/post/9595887299/recipe-a-b-testing-with-kissmetrics-and-the-split-gem)
* [Rails A/B testing with Split on Heroku](http://blog.nathanhumbert.com/2012/02/rails-ab-testing-with-split-on-heroku.html)
## Backers
Support us with a monthly donation and help us continue our activities. [[Become a backer](https://opencollective.com/split#backer)]
<a href="https://opencollective.com/split/backer/0/website" target="_blank"><img src="https://opencollective.com/split/backer/0/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/1/website" target="_blank"><img src="https://opencollective.com/split/backer/1/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/2/website" target="_blank"><img src="https://opencollective.com/split/backer/2/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/3/website" target="_blank"><img src="https://opencollective.com/split/backer/3/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/4/website" target="_blank"><img src="https://opencollective.com/split/backer/4/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/5/website" target="_blank"><img src="https://opencollective.com/split/backer/5/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/6/website" target="_blank"><img src="https://opencollective.com/split/backer/6/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/7/website" target="_blank"><img src="https://opencollective.com/split/backer/7/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/8/website" target="_blank"><img src="https://opencollective.com/split/backer/8/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/9/website" target="_blank"><img src="https://opencollective.com/split/backer/9/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/10/website" target="_blank"><img src="https://opencollective.com/split/backer/10/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/11/website" target="_blank"><img src="https://opencollective.com/split/backer/11/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/12/website" target="_blank"><img src="https://opencollective.com/split/backer/12/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/13/website" target="_blank"><img src="https://opencollective.com/split/backer/13/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/14/website" target="_blank"><img src="https://opencollective.com/split/backer/14/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/15/website" target="_blank"><img src="https://opencollective.com/split/backer/15/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/16/website" target="_blank"><img src="https://opencollective.com/split/backer/16/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/17/website" target="_blank"><img src="https://opencollective.com/split/backer/17/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/18/website" target="_blank"><img src="https://opencollective.com/split/backer/18/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/19/website" target="_blank"><img src="https://opencollective.com/split/backer/19/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/20/website" target="_blank"><img src="https://opencollective.com/split/backer/20/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/21/website" target="_blank"><img src="https://opencollective.com/split/backer/21/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/22/website" target="_blank"><img src="https://opencollective.com/split/backer/22/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/23/website" target="_blank"><img src="https://opencollective.com/split/backer/23/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/24/website" target="_blank"><img src="https://opencollective.com/split/backer/24/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/25/website" target="_blank"><img src="https://opencollective.com/split/backer/25/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/26/website" target="_blank"><img src="https://opencollective.com/split/backer/26/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/27/website" target="_blank"><img src="https://opencollective.com/split/backer/27/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/28/website" target="_blank"><img src="https://opencollective.com/split/backer/28/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/29/website" target="_blank"><img src="https://opencollective.com/split/backer/29/avatar.svg"></a>
## Sponsors
Become a sponsor and get your logo on our README on Github with a link to your site. [[Become a sponsor](https://opencollective.com/split#sponsor)]
<a href="https://opencollective.com/split/sponsor/0/website" target="_blank"><img src="https://opencollective.com/split/sponsor/0/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/1/website" target="_blank"><img src="https://opencollective.com/split/sponsor/1/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/2/website" target="_blank"><img src="https://opencollective.com/split/sponsor/2/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/3/website" target="_blank"><img src="https://opencollective.com/split/sponsor/3/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/4/website" target="_blank"><img src="https://opencollective.com/split/sponsor/4/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/5/website" target="_blank"><img src="https://opencollective.com/split/sponsor/5/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/6/website" target="_blank"><img src="https://opencollective.com/split/sponsor/6/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/7/website" target="_blank"><img src="https://opencollective.com/split/sponsor/7/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/8/website" target="_blank"><img src="https://opencollective.com/split/sponsor/8/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/9/website" target="_blank"><img src="https://opencollective.com/split/sponsor/9/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/10/website" target="_blank"><img src="https://opencollective.com/split/sponsor/10/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/11/website" target="_blank"><img src="https://opencollective.com/split/sponsor/11/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/12/website" target="_blank"><img src="https://opencollective.com/split/sponsor/12/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/13/website" target="_blank"><img src="https://opencollective.com/split/sponsor/13/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/14/website" target="_blank"><img src="https://opencollective.com/split/sponsor/14/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/15/website" target="_blank"><img src="https://opencollective.com/split/sponsor/15/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/16/website" target="_blank"><img src="https://opencollective.com/split/sponsor/16/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/17/website" target="_blank"><img src="https://opencollective.com/split/sponsor/17/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/18/website" target="_blank"><img src="https://opencollective.com/split/sponsor/18/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/19/website" target="_blank"><img src="https://opencollective.com/split/sponsor/19/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/20/website" target="_blank"><img src="https://opencollective.com/split/sponsor/20/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/21/website" target="_blank"><img src="https://opencollective.com/split/sponsor/21/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/22/website" target="_blank"><img src="https://opencollective.com/split/sponsor/22/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/23/website" target="_blank"><img src="https://opencollective.com/split/sponsor/23/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/24/website" target="_blank"><img src="https://opencollective.com/split/sponsor/24/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/25/website" target="_blank"><img src="https://opencollective.com/split/sponsor/25/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/26/website" target="_blank"><img src="https://opencollective.com/split/sponsor/26/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/27/website" target="_blank"><img src="https://opencollective.com/split/sponsor/27/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/28/website" target="_blank"><img src="https://opencollective.com/split/sponsor/28/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/29/website" target="_blank"><img src="https://opencollective.com/split/sponsor/29/avatar.svg"></a>
## Contribute
Please do! Over 70 different people have contributed to the project, you can see them all here: https://github.com/splitrb/split/graphs/contributors.
### Development
The source code is hosted at [GitHub](https://github.com/splitrb/split).
Report issues and feature requests on [GitHub Issues](https://github.com/splitrb/split/issues).
You can find a discussion form on [Google Groups](https://groups.google.com/d/forum/split-ruby).
### Tests
Run the tests like this:
# Start a Redis server in another tab.
redis-server
bundle
rake spec
### A Note on Patches and Pull Requests
* Fork the project.
* Make your feature addition or bug fix.
* Add tests for it. This is important so I don't break it in a
future version unintentionally.
* Add documentation if necessary.
* Commit. Do not mess with the rakefile, version, or history.
(If you want to have your own version, that is fine. But bump the version in a commit by itself, which I can ignore when I pull.)
* Send a pull request. Bonus points for topic branches.
### Code of Conduct
Please note that this project is released with a [Contributor Code of Conduct](CODE_OF_CONDUCT.md). By participating in this project you agree to abide by its terms.
## Copyright
[MIT License](LICENSE) © 2019 [Andrew Nesbitt](https://github.com/andrew).
<MSG> Merge pull request #368 from hkliya/master
Fix typo in README
<DFF> @@ -628,7 +628,7 @@ trial.choose!
trial.alternative.name
# if the goal has been achieved, increment the successful completions for this alternative.
-if goal_acheived?
+if goal_achieved?
trial.complete!
end
| 1 | Merge pull request #368 from hkliya/master | 1 | .md | md | mit | splitrb/split |
10071059 | <NME> jquery.meow.js
<BEF> (function ($, window) {
'use strict';
// Meow queue
var default_meow_area,
meows = {
queue: {},
add: function (meow) {
this.queue[meow.timestamp] = meow;
},
get: function (timestamp) {
return this.queue[timestamp];
},
remove: function (timestamp) {
delete this.queue[timestamp];
},
size: function () {
var timestamp,
size = 0;
for (timestamp in this.queue) {
if (this.queue.hasOwnProperty(timestamp)) { size += 1; }
}
return size;
}
},
// Meow constructor
Meow = function (options) {
var that = this;
this.timestamp = new Date().getTime(); // used to identify this meow and timeout
this.hovered = false; // whether mouse is over or not
if (typeof default_meow_area === 'undefined'
&& typeof options.container === 'undefined') {
default_meow_area = $(window.document.createElement('div'))
.attr({'id': ((new Date()).getTime()), 'class': 'meows'});
$('body').prepend(default_meow_area);
}
if (meows.size() <= 0) {
if (typeof options.beforeCreateFirst === 'function') {
options.beforeCreateFirst.call(that);
}
}
if (typeof options.container === 'string') {
this.container = $(options.container);
} else {
this.container = default_meow_area;
}
if (typeof options.title === 'string') {
this.title = options.title;
}
if (typeof options.message === 'string') {
this.message = options.message;
} else if (options.message instanceof $) {
if (options.message.is('input,textarea,select')) {
this.message = options.message.val();
} else {
this.message = options.message.text();
}
if (typeof this.title === 'undefined' && typeof options.message.attr('title') === 'string') {
this.title = options.message.attr('title');
}
}
if (typeof options.icon === 'string') {
this.icon = options.icon;
}
if (options.sticky) {
this.duration = Infinity;
} else {
this.duration = options.duration || 5000;
if (typeof options === 'string') {
event = options;
} else if (typeof options == 'object') {
// is the message an object we need to parse or just a string?
if (typeof options.trigger === 'string') {
trigger = options.trigger;
}
if (typeof options.message === 'string') {
message = options.message;
} else if (typeof options.message === 'object') {
var type = options.message[0].nodeName;
if ($.inArray(type, ['INPUT', 'SELECT', 'TEXTAREA']) !== -1) {
message = options.message.attr('value');
} else {
message = options.message.text();
}
}
if (typeof options.icon === 'string') {
icon = options.icon;
}
}
if (trigger && message) {
$(this).bind(trigger, function () {
methods.createMessage(message, icon);
});
}
$(window.document.createElement('img')).attr('src', this.icon)
)
);
}
// Add close button if the meow isn't uncloseable
// TODO: this close button needs to be much prettier
if (options.closeable !== false) {
this.manifest.find('.inner').prepend(
$(window.document.createElement('a'))
.addClass('close')
.html('×')
.attr('href', '#close-meow-' + that.timestamp)
.click(function (e) {
e.preventDefault();
that.destroy();
})
);
}
this.manifest.bind('mouseenter mouseleave', function (event) {
if (event.type === 'mouseleave') {
that.hovered = false;
that.manifest.removeClass('hover');
// Destroy the mow on mouseleave if it's timed out
if (that.timestamp + that.duration <= new Date().getTime()) {
that.destroy();
}
} else {
that.hovered = true;
that.manifest.addClass('hover');
}
});
// Add a timeout if the duration isn't Infinity
if (this.duration !== Infinity) {
this.timeout = window.setTimeout(function () {
// Make sure this meow hasn't already been destroyed
if (typeof meows.get(that.timestamp) !== 'undefined') {
// Call callback if it's defined (this = meow DOM element)
if (typeof options.onTimeout === 'function') {
options.onTimeout.call(that.manifest);
}
// Don't destroy if user is hovering over meow
if (that.hovered !== true && typeof that === 'object') {
that.destroy();
}
}
}, that.duration);
}
this.destroy = function () {
if (that.destroyed !== true) {
// Call callback if it's defined (this = meow DOM element)
if (typeof options.beforeDestroy === 'function') {
options.beforeDestroy.call(that.manifest);
}
that.manifest.find('.inner').fadeTo(400, 0, function () {
that.manifest.slideUp(function () {
that.manifest.remove();
that.destroyed = true;
meows.remove(that.timestamp);
if (typeof options.afterDestroy === 'function') {
options.afterDestroy.call(null);
}
if (meows.size() <= 0) {
if (default_meow_area instanceof $) {
default_meow_area.remove();
default_meow_area = undefined;
}
if (typeof options.afterDestroyLast === 'function') {
options.afterDestroyLast.call(null);
}
}
});
});
}
};
};
$.fn.meow = function (args) {
var meow = new Meow(args);
meows.add(meow);
return meow;
};
$.meow = $.fn.meow;
}(jQuery, window));
<MSG> fixed non-updating messages
<DFF> @@ -77,26 +77,28 @@
if (typeof options === 'string') {
event = options;
} else if (typeof options == 'object') {
- // is the message an object we need to parse or just a string?
+ // set the event
if (typeof options.trigger === 'string') {
trigger = options.trigger;
}
- if (typeof options.message === 'string') {
- message = options.message;
- } else if (typeof options.message === 'object') {
- var type = options.message[0].nodeName;
- if ($.inArray(type, ['INPUT', 'SELECT', 'TEXTAREA']) !== -1) {
- message = options.message.attr('value');
- } else {
- message = options.message.text();
- }
- }
- if (typeof options.icon === 'string') {
- icon = options.icon;
- }
+ // is the message an object we need to parse or just a string?
+
}
- if (trigger && message) {
+ if (typeof trigger === 'string') {
$(this).bind(trigger, function () {
+ if (typeof options.message === 'string') {
+ message = options.message;
+ } else if (typeof options.message === 'object') {
+ var type = options.message[0].nodeName;
+ if ($.inArray(type, ['INPUT', 'SELECT', 'TEXTAREA']) !== -1) {
+ message = options.message.attr('value');
+ } else {
+ message = options.message.text();
+ }
+ }
+ if (typeof options.icon === 'string') {
+ icon = options.icon;
+ }
methods.createMessage(message, icon);
});
}
| 17 | fixed non-updating messages | 15 | .js | meow | mit | zacstewart/Meow |
10071060 | <NME> jquery.meow.js
<BEF> (function ($, window) {
'use strict';
// Meow queue
var default_meow_area,
meows = {
queue: {},
add: function (meow) {
this.queue[meow.timestamp] = meow;
},
get: function (timestamp) {
return this.queue[timestamp];
},
remove: function (timestamp) {
delete this.queue[timestamp];
},
size: function () {
var timestamp,
size = 0;
for (timestamp in this.queue) {
if (this.queue.hasOwnProperty(timestamp)) { size += 1; }
}
return size;
}
},
// Meow constructor
Meow = function (options) {
var that = this;
this.timestamp = new Date().getTime(); // used to identify this meow and timeout
this.hovered = false; // whether mouse is over or not
if (typeof default_meow_area === 'undefined'
&& typeof options.container === 'undefined') {
default_meow_area = $(window.document.createElement('div'))
.attr({'id': ((new Date()).getTime()), 'class': 'meows'});
$('body').prepend(default_meow_area);
}
if (meows.size() <= 0) {
if (typeof options.beforeCreateFirst === 'function') {
options.beforeCreateFirst.call(that);
}
}
if (typeof options.container === 'string') {
this.container = $(options.container);
} else {
this.container = default_meow_area;
}
if (typeof options.title === 'string') {
this.title = options.title;
}
if (typeof options.message === 'string') {
this.message = options.message;
} else if (options.message instanceof $) {
if (options.message.is('input,textarea,select')) {
this.message = options.message.val();
} else {
this.message = options.message.text();
}
if (typeof this.title === 'undefined' && typeof options.message.attr('title') === 'string') {
this.title = options.message.attr('title');
}
}
if (typeof options.icon === 'string') {
this.icon = options.icon;
}
if (options.sticky) {
this.duration = Infinity;
} else {
this.duration = options.duration || 5000;
if (typeof options === 'string') {
event = options;
} else if (typeof options == 'object') {
// is the message an object we need to parse or just a string?
if (typeof options.trigger === 'string') {
trigger = options.trigger;
}
if (typeof options.message === 'string') {
message = options.message;
} else if (typeof options.message === 'object') {
var type = options.message[0].nodeName;
if ($.inArray(type, ['INPUT', 'SELECT', 'TEXTAREA']) !== -1) {
message = options.message.attr('value');
} else {
message = options.message.text();
}
}
if (typeof options.icon === 'string') {
icon = options.icon;
}
}
if (trigger && message) {
$(this).bind(trigger, function () {
methods.createMessage(message, icon);
});
}
$(window.document.createElement('img')).attr('src', this.icon)
)
);
}
// Add close button if the meow isn't uncloseable
// TODO: this close button needs to be much prettier
if (options.closeable !== false) {
this.manifest.find('.inner').prepend(
$(window.document.createElement('a'))
.addClass('close')
.html('×')
.attr('href', '#close-meow-' + that.timestamp)
.click(function (e) {
e.preventDefault();
that.destroy();
})
);
}
this.manifest.bind('mouseenter mouseleave', function (event) {
if (event.type === 'mouseleave') {
that.hovered = false;
that.manifest.removeClass('hover');
// Destroy the mow on mouseleave if it's timed out
if (that.timestamp + that.duration <= new Date().getTime()) {
that.destroy();
}
} else {
that.hovered = true;
that.manifest.addClass('hover');
}
});
// Add a timeout if the duration isn't Infinity
if (this.duration !== Infinity) {
this.timeout = window.setTimeout(function () {
// Make sure this meow hasn't already been destroyed
if (typeof meows.get(that.timestamp) !== 'undefined') {
// Call callback if it's defined (this = meow DOM element)
if (typeof options.onTimeout === 'function') {
options.onTimeout.call(that.manifest);
}
// Don't destroy if user is hovering over meow
if (that.hovered !== true && typeof that === 'object') {
that.destroy();
}
}
}, that.duration);
}
this.destroy = function () {
if (that.destroyed !== true) {
// Call callback if it's defined (this = meow DOM element)
if (typeof options.beforeDestroy === 'function') {
options.beforeDestroy.call(that.manifest);
}
that.manifest.find('.inner').fadeTo(400, 0, function () {
that.manifest.slideUp(function () {
that.manifest.remove();
that.destroyed = true;
meows.remove(that.timestamp);
if (typeof options.afterDestroy === 'function') {
options.afterDestroy.call(null);
}
if (meows.size() <= 0) {
if (default_meow_area instanceof $) {
default_meow_area.remove();
default_meow_area = undefined;
}
if (typeof options.afterDestroyLast === 'function') {
options.afterDestroyLast.call(null);
}
}
});
});
}
};
};
$.fn.meow = function (args) {
var meow = new Meow(args);
meows.add(meow);
return meow;
};
$.meow = $.fn.meow;
}(jQuery, window));
<MSG> fixed non-updating messages
<DFF> @@ -77,26 +77,28 @@
if (typeof options === 'string') {
event = options;
} else if (typeof options == 'object') {
- // is the message an object we need to parse or just a string?
+ // set the event
if (typeof options.trigger === 'string') {
trigger = options.trigger;
}
- if (typeof options.message === 'string') {
- message = options.message;
- } else if (typeof options.message === 'object') {
- var type = options.message[0].nodeName;
- if ($.inArray(type, ['INPUT', 'SELECT', 'TEXTAREA']) !== -1) {
- message = options.message.attr('value');
- } else {
- message = options.message.text();
- }
- }
- if (typeof options.icon === 'string') {
- icon = options.icon;
- }
+ // is the message an object we need to parse or just a string?
+
}
- if (trigger && message) {
+ if (typeof trigger === 'string') {
$(this).bind(trigger, function () {
+ if (typeof options.message === 'string') {
+ message = options.message;
+ } else if (typeof options.message === 'object') {
+ var type = options.message[0].nodeName;
+ if ($.inArray(type, ['INPUT', 'SELECT', 'TEXTAREA']) !== -1) {
+ message = options.message.attr('value');
+ } else {
+ message = options.message.text();
+ }
+ }
+ if (typeof options.icon === 'string') {
+ icon = options.icon;
+ }
methods.createMessage(message, icon);
});
}
| 17 | fixed non-updating messages | 15 | .js | meow | mit | zacstewart/Meow |
10071061 | <NME> urls.py
<BEF> # -*- coding: utf-8 -*-
from django.conf.urls.defaults import patterns, url, include
urlpatterns = patterns("djangopypi.views",
# Simple PyPI
url(r'^simple/$', "simple",
name="djangopypi-simple"),
url(r'^simple/(?P<dist_name>[\w\d_\.\-]+)/(?P<version>[\w\.\d\-_]+)/$',
"show_version",
name="djangopypi-show_version"),
url(r'^simple/(?P<dist_name>[\w\d_\.\-]+)/$', "show_links",
name="djangopypi-show_links"),
url(r'^$', "simple", {'template_name': 'djangopypi/pypi.html'},
name="djangopypi-pypi"),
url(r'^(?P<dist_name>[\w\d_\.\-]+)/$', "show_links",
{'template_name': 'djangopypi/pypi_show_links.html'},
name="djangopypi-pypi_show_links"),
)
<MSG> Merge branch 'viniciuschagas/master'
<DFF> @@ -19,5 +19,6 @@ urlpatterns = patterns("djangopypi.views",
url(r'^(?P<dist_name>[\w\d_\.\-]+)/$', "show_links",
{'template_name': 'djangopypi/pypi_show_links.html'},
name="djangopypi-pypi_show_links"),
-)
-
+
+ url(r'^search','search',name='djangopypi-search')
+)
\ No newline at end of file
| 3 | Merge branch 'viniciuschagas/master' | 2 | .py | py | bsd-3-clause | ask/chishop |
10071062 | <NME> helper_spec.rb
<BEF> # frozen_string_literal: true
require "spec_helper"
# TODO change some of these tests to use Rack::Test
describe Split::Helper do
include Split::Helper
let(:experiment) {
Split::ExperimentCatalog.find_or_create("link_color", "blue", "red")
}
describe "ab_test" do
it "should not raise an error when passed strings for alternatives" do
expect { ab_test("xyz", "1", "2", "3") }.not_to raise_error
end
it "should not raise an error when passed an array for alternatives" do
expect { ab_test("xyz", ["1", "2", "3"]) }.not_to raise_error
end
it "should raise the appropriate error when passed integers for alternatives" do
expect { ab_test("xyz", 1, 2, 3) }.to raise_error(ArgumentError)
end
it "should raise the appropriate error when passed symbols for alternatives" do
expect { ab_test("xyz", :a, :b, :c) }.to raise_error(ArgumentError)
end
it "should not raise error when passed an array for goals" do
expect { ab_test({ "link_color" => ["purchase", "refund"] }, "blue", "red") }.not_to raise_error
end
it "should not raise error when passed just one goal" do
expect { ab_test({ "link_color" => "purchase" }, "blue", "red") }.not_to raise_error
end
it "raises an appropriate error when processing combined expirements" do
Split.configuration.experiments = {
combined_exp_1: {
alternatives: [ { name: "control", percent: 50 }, { name: "test-alt", percent: 50 } ],
metric: :my_metric,
combined_experiments: [:combined_exp_1_sub_1]
}
}
Split::ExperimentCatalog.find_or_create("combined_exp_1")
expect { ab_test("combined_exp_1") }.to raise_error(Split::InvalidExperimentsFormatError)
end
it "should assign a random alternative to a new user when there are an equal number of alternatives assigned" do
ab_test("link_color", "blue", "red")
expect(["red", "blue"]).to include(ab_user["link_color"])
end
it "should increment the participation counter after assignment to a new user" do
previous_red_count = Split::Alternative.new("red", "link_color").participant_count
previous_blue_count = Split::Alternative.new("blue", "link_color").participant_count
ab_test("link_color", "blue", "red")
new_red_count = Split::Alternative.new("red", "link_color").participant_count
new_blue_count = Split::Alternative.new("blue", "link_color").participant_count
expect((new_red_count + new_blue_count)).to eq(previous_red_count + previous_blue_count + 1)
end
it "should not increment the counter for an experiment that the user is not participating in" do
ab_test("link_color", "blue", "red")
e = Split::ExperimentCatalog.find_or_create("button_size", "small", "big")
expect {
# User shouldn't participate in this second experiment
ab_test("button_size", "small", "big")
}.not_to change { e.participant_count }
end
it "should not increment the counter for an ended experiment" do
e = Split::ExperimentCatalog.find_or_create("button_size", "small", "big")
e.winner = "small"
expect {
a = ab_test("button_size", "small", "big")
expect(a).to eq("small")
}.not_to change { e.participant_count }
end
it "should not increment the counter for an not started experiment" do
expect(Split.configuration).to receive(:start_manually).and_return(true)
e = Split::ExperimentCatalog.find_or_create("button_size", "small", "big")
expect {
a = ab_test("button_size", "small", "big")
expect(a).to eq("small")
}.not_to change { e.participant_count }
end
it "should return the given alternative for an existing user" do
expect(ab_test("link_color", "blue", "red")).to eq ab_test("link_color", "blue", "red")
end
it "should always return the winner if one is present" do
experiment.winner = "orange"
expect(ab_test("link_color", "blue", "red")).to eq("orange")
end
it "should allow the alternative to be forced by passing it in the params" do
# ?ab_test[link_color]=blue
@params = { "ab_test" => { "link_color" => "blue" } }
alternative = ab_test("link_color", "blue", "red")
expect(alternative).to eq("blue")
alternative = ab_test("link_color", { "blue" => 1 }, "red" => 5)
expect(alternative).to eq("blue")
@params = { "ab_test" => { "link_color" => "red" } }
alternative = ab_test("link_color", "blue", "red")
expect(alternative).to eq("red")
alternative = ab_test("link_color", { "blue" => 5 }, "red" => 1)
expect(alternative).to eq("red")
end
it "should not allow an arbitrary alternative" do
@params = { "ab_test" => { "link_color" => "pink" } }
alternative = ab_test("link_color", "blue")
expect(alternative).to eq("blue")
end
it "should not store the split when a param forced alternative" do
@params = { "ab_test" => { "link_color" => "blue" } }
expect(ab_user).not_to receive(:[]=)
ab_test("link_color", "blue", "red")
end
it "SPLIT_DISABLE query parameter should also force the alternative (uses control)" do
@params = { "SPLIT_DISABLE" => "true" }
alternative = ab_test("link_color", "blue", "red")
expect(alternative).to eq("blue")
alternative = ab_test("link_color", { "blue" => 1 }, "red" => 5)
expect(alternative).to eq("blue")
alternative = ab_test("link_color", "red", "blue")
expect(alternative).to eq("red")
alternative = ab_test("link_color", { "red" => 5 }, "blue" => 1)
expect(alternative).to eq("red")
end
it "should not store the split when Split generically disabled" do
@params = { "SPLIT_DISABLE" => "true" }
expect(ab_user).not_to receive(:[]=)
ab_test("link_color", "blue", "red")
end
context "when store_override is set" do
before { Split.configuration.store_override = true }
it "should store the forced alternative" do
@params = { "ab_test" => { "link_color" => "blue" } }
expect(ab_user).to receive(:[]=).with("link_color", "blue")
ab_test("link_color", "blue", "red")
end
end
context "when on_trial_choose is set" do
before { Split.configuration.on_trial_choose = :some_method }
it "should call the method" do
expect(self).to receive(:some_method)
ab_test("link_color", "blue", "red")
end
end
it "should allow passing a block" do
alt = ab_test("link_color", "blue", "red")
ret = ab_test("link_color", "blue", "red") { |alternative| "shared/#{alternative}" }
expect(ret).to eq("shared/#{alt}")
end
it "should allow the share of visitors see an alternative to be specified" do
ab_test("link_color", { "blue" => 0.8 }, { "red" => 20 })
expect(["red", "blue"]).to include(ab_user["link_color"])
end
it "should allow alternative weighting interface as a single hash" do
ab_test("link_color", { "blue" => 0.01 }, "red" => 0.2)
experiment = Split::ExperimentCatalog.find("link_color")
expect(experiment.alternatives.map(&:name)).to eq(["blue", "red"])
expect(experiment.alternatives.collect { |a| a.weight }).to match_array([0.01, 0.2])
end
it "should only let a user participate in one experiment at a time" do
link_color = ab_test("link_color", "blue", "red")
ab_test("button_size", "small", "big")
expect(ab_user["link_color"]).to eq(link_color)
big = Split::Alternative.new("big", "button_size")
expect(big.participant_count).to eq(0)
small = Split::Alternative.new("small", "button_size")
expect(small.participant_count).to eq(0)
end
it "should let a user participate in many experiment with allow_multiple_experiments option" do
Split.configure do |config|
config.allow_multiple_experiments = true
end
link_color = ab_test("link_color", "blue", "red")
button_size = ab_test("button_size", "small", "big")
expect(ab_user["link_color"]).to eq(link_color)
expect(ab_user["button_size"]).to eq(button_size)
button_size_alt = Split::Alternative.new(button_size, "button_size")
expect(button_size_alt.participant_count).to eq(1)
end
context "with allow_multiple_experiments = 'control'" do
it "should let a user participate in many experiment with one non-'control' alternative" do
Split.configure do |config|
config.allow_multiple_experiments = "control"
end
groups = 100.times.map do |n|
ab_test("test#{n}".to_sym, { "control" => (100 - n) }, { "test#{n}-alt" => n })
end
experiments = ab_user.active_experiments
expect(experiments.size).to be > 1
count_control = experiments.values.count { |g| g == "control" }
expect(count_control).to eq(experiments.size - 1)
count_alts = groups.count { |g| g != "control" }
expect(count_alts).to eq(1)
end
context "when user already has experiment" do
let(:mock_user) { Split::User.new(self, { "test_0" => "test-alt" }) }
before do
Split.configure do |config|
config.allow_multiple_experiments = "control"
end
Split::ExperimentCatalog.find_or_initialize("test_0", "control", "test-alt").save
Split::ExperimentCatalog.find_or_initialize("test_1", "control", "test-alt").save
end
it "should restore previously selected alternative" do
expect(ab_user.active_experiments.size).to eq 1
expect(ab_test(:test_0, { "control" => 100 }, { "test-alt" => 1 })).to eq "test-alt"
expect(ab_test(:test_0, { "control" => 1 }, { "test-alt" => 100 })).to eq "test-alt"
end
it "should select the correct alternatives after experiment resets" do
experiment = Split::ExperimentCatalog.find(:test_0)
experiment.reset
mock_user[experiment.key] = "test-alt"
expect(ab_user.active_experiments.size).to eq 1
expect(ab_test(:test_0, { "control" => 100 }, { "test-alt" => 1 })).to eq "test-alt"
expect(ab_test(:test_0, { "control" => 0 }, { "test-alt" => 100 })).to eq "test-alt"
end
it "lets override existing choice" do
pending "this requires user store reset on first call not depending on whelther it is current trial"
@params = { "ab_test" => { "test_1" => "test-alt" } }
expect(ab_test(:test_0, { "control" => 0 }, { "test-alt" => 100 })).to eq "control"
expect(ab_test(:test_1, { "control" => 100 }, { "test-alt" => 1 })).to eq "test-alt"
end
end
end
it "should not over-write a finished key when an experiment is on a later version" do
experiment.increment_version
ab_user = { experiment.key => "blue", experiment.finished_key => true }
finished_session = ab_user.dup
ab_test("link_color", "blue", "red")
expect(ab_user).to eq(finished_session)
end
end
describe "metadata" do
context "is defined" do
before do
Split.configuration.experiments = {
my_experiment: {
alternatives: ["one", "two"],
resettable: false,
metadata: { "one" => "Meta1", "two" => "Meta2" }
}
}
end
it "should be passed to helper block" do
@params = { "ab_test" => { "my_experiment" => "two" } }
expect(ab_test("my_experiment")).to eq "two"
expect(ab_test("my_experiment") do |alternative, meta|
meta
end).to eq("Meta2")
end
it "should pass control metadata helper block if library disabled" do
Split.configure do |config|
config.enabled = false
end
expect(ab_test("my_experiment")).to eq "one"
expect(ab_test("my_experiment") do |_, meta|
meta
end).to eq("Meta1")
end
end
context "is not defined" do
before do
Split.configuration.experiments = {
my_experiment: {
alternatives: ["one", "two"],
resettable: false,
metadata: nil
}
}
end
it "should be passed to helper block" do
expect(ab_test("my_experiment") do |alternative, meta|
meta
end).to eq({})
end
it "should pass control metadata helper block if library disabled" do
Split.configure do |config|
config.enabled = false
end
expect(ab_test("my_experiment") do |_, meta|
meta
end).to eq({})
end
end
end
describe "ab_finished" do
context "for an experiment that the user participates in" do
before(:each) do
@experiment_name = "link_color"
@alternatives = ["blue", "red"]
@experiment = Split::ExperimentCatalog.find_or_create(@experiment_name, *@alternatives)
@alternative_name = ab_test(@experiment_name, *@alternatives)
@previous_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count
end
it "should increment the counter for the completed alternative" do
ab_finished(@experiment_name)
new_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count
expect(new_completion_count).to eq(@previous_completion_count + 1)
end
it "should set experiment's finished key if reset is false" do
ab_finished(@experiment_name, { reset: false })
expect(ab_user[@experiment.key]).to eq(@alternative_name)
expect(ab_user[@experiment.finished_key]).to eq(true)
end
it "should not increment the counter if reset is false and the experiment has been already finished" do
2.times { ab_finished(@experiment_name, { reset: false }) }
new_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count
expect(new_completion_count).to eq(@previous_completion_count + 1)
end
it "should not increment the counter for an ended experiment" do
e = Split::ExperimentCatalog.find_or_create("button_size", "small", "big")
e.winner = "small"
a = ab_test("button_size", "small", "big")
expect(a).to eq("small")
expect {
ab_finished("button_size")
}.not_to change { Split::Alternative.new(a, "button_size").completed_count }
end
it "should clear out the user's participation from their session" do
expect(ab_user[@experiment.key]).to eq(@alternative_name)
ab_finished(@experiment_name)
expect(ab_user.keys).to be_empty
end
it "should not clear out the users session if reset is false" do
expect(ab_user[@experiment.key]).to eq(@alternative_name)
ab_finished(@experiment_name, { reset: false })
expect(ab_user[@experiment.key]).to eq(@alternative_name)
expect(ab_user[@experiment.finished_key]).to eq(true)
end
it "should reset the users session when experiment is not versioned" do
expect(ab_user[@experiment.key]).to eq(@alternative_name)
ab_finished(@experiment_name)
expect(ab_user.keys).to be_empty
end
it "should reset the users session when experiment is versioned" do
@experiment.increment_version
@alternative_name = ab_test(@experiment_name, *@alternatives)
expect(ab_user[@experiment.key]).to eq(@alternative_name)
ab_finished(@experiment_name)
expect(ab_user.keys).to be_empty
end
context "when on_trial_complete is set" do
before { Split.configuration.on_trial_complete = :some_method }
it "should call the method" do
expect(self).to receive(:some_method)
ab_finished(@experiment_name)
"shared/#{alternative}"
end.should eq('shared/blue')
end
end
describe 'finished' do
end
context "for an experiment that the user does not participate in" do
before do
Split::ExperimentCatalog.find_or_create(:not_started_experiment, "control", "alt")
end
it "should not raise an exception" do
expect { ab_finished(:not_started_experiment) }.not_to raise_exception
end
it "should not change the user state when reset is false" do
expect { ab_finished(:not_started_experiment, reset: false) }.not_to change { ab_user.keys }.from([])
end
it "should not change the user state when reset is true" do
expect(self).not_to receive(:reset!)
ab_finished(:not_started_experiment)
end
it "should not increment the completed counter" do
ab_finished(:not_started_experiment)
expect(Split::Alternative.new("control", :not_started_experiment).completed_count).to eq(0)
expect(Split::Alternative.new("alt", :not_started_experiment).completed_count).to eq(0)
end
end
end
context "finished with config" do
it "passes reset option" do
Split.configuration.experiments = {
my_experiment: {
alternatives: ["one", "two"],
resettable: false,
}
}
alternative = ab_test(:my_experiment)
experiment = Split::ExperimentCatalog.find :my_experiment
ab_finished :my_experiment
expect(ab_user[experiment.key]).to eq(alternative)
expect(ab_user[experiment.finished_key]).to eq(true)
end
end
context "finished with metric name" do
before { Split.configuration.experiments = {} }
before { expect(Split::Alternative).to receive(:new).at_least(1).times.and_call_original }
def should_finish_experiment(experiment_name, should_finish = true)
alts = Split.configuration.experiments[experiment_name][:alternatives]
experiment = Split::ExperimentCatalog.find_or_create(experiment_name, *alts)
alt_name = ab_user[experiment.key] = alts.first
alt = double("alternative")
expect(alt).to receive(:name).at_most(1).times.and_return(alt_name)
expect(Split::Alternative).to receive(:new).at_most(1).times.with(alt_name, experiment_name.to_s).and_return(alt)
if should_finish
expect(alt).to receive(:increment_completion).at_most(1).times
else
expect(alt).not_to receive(:increment_completion)
end
end
it "completes the test" do
Split.configuration.experiments[:my_experiment] = {
alternatives: [ "control_opt", "other_opt" ],
metric: :my_metric
}
should_finish_experiment :my_experiment
ab_finished :my_metric
end
it "completes all relevant tests" do
Split.configuration.experiments = {
exp_1: {
alternatives: [ "1-1", "1-2" ],
metric: :my_metric
},
exp_2: {
alternatives: [ "2-1", "2-2" ],
metric: :another_metric
},
exp_3: {
alternatives: [ "3-1", "3-2" ],
metric: :my_metric
},
}
should_finish_experiment :exp_1
should_finish_experiment :exp_2, false
should_finish_experiment :exp_3
ab_finished :my_metric
end
it "passes reset option" do
Split.configuration.experiments = {
my_exp: {
alternatives: ["one", "two"],
metric: :my_metric,
resettable: false,
}
}
alternative_name = ab_test(:my_exp)
exp = Split::ExperimentCatalog.find :my_exp
ab_finished :my_metric
expect(ab_user[exp.key]).to eq(alternative_name)
expect(ab_user[exp.finished_key]).to be_truthy
end
it "passes through options" do
Split.configuration.experiments = {
my_exp: {
alternatives: ["one", "two"],
metric: :my_metric,
}
}
alternative_name = ab_test(:my_exp)
exp = Split::ExperimentCatalog.find :my_exp
ab_finished :my_metric, reset: false
expect(ab_user[exp.key]).to eq(alternative_name)
expect(ab_user[exp.finished_key]).to be_truthy
end
end
describe "conversions" do
it "should return a conversion rate for an alternative" do
alternative_name = ab_test("link_color", "blue", "red")
previous_convertion_rate = Split::Alternative.new(alternative_name, "link_color").conversion_rate
expect(previous_convertion_rate).to eq(0.0)
ab_finished("link_color")
new_convertion_rate = Split::Alternative.new(alternative_name, "link_color").conversion_rate
expect(new_convertion_rate).to eq(1.0)
end
end
describe "active experiments" do
it "should show an active test" do
alternative = ab_test("def", "4", "5", "6")
expect(active_experiments.count).to eq 1
expect(active_experiments.first[0]).to eq "def"
expect(active_experiments.first[1]).to eq alternative
end
it "should show a finished test" do
alternative = ab_test("def", "4", "5", "6")
ab_finished("def", { reset: false })
expect(active_experiments.count).to eq 1
expect(active_experiments.first[0]).to eq "def"
expect(active_experiments.first[1]).to eq alternative
end
it "should show an active test when an experiment is on a later version" do
experiment.reset
expect(experiment.version).to eq(1)
ab_test("link_color", "blue", "red")
expect(active_experiments.count).to eq 1
expect(active_experiments.first[0]).to eq "link_color"
end
it "should show versioned tests properly" do
10.times { experiment.reset }
alternative = ab_test(experiment.name, "blue", "red")
ab_finished(experiment.name, reset: false)
expect(experiment.version).to eq(10)
expect(active_experiments.count).to eq 1
expect(active_experiments).to eq({ "link_color" => alternative })
end
it "should show multiple tests" do
Split.configure do |config|
config.allow_multiple_experiments = true
end
alternative = ab_test("def", "4", "5", "6")
another_alternative = ab_test("ghi", "7", "8", "9")
expect(active_experiments.count).to eq 2
expect(active_experiments["def"]).to eq alternative
expect(active_experiments["ghi"]).to eq another_alternative
end
it "should not show tests with winners" do
Split.configure do |config|
config.allow_multiple_experiments = true
end
e = Split::ExperimentCatalog.find_or_create("def", "4", "5", "6")
e.winner = "4"
ab_test("def", "4", "5", "6")
another_alternative = ab_test("ghi", "7", "8", "9")
expect(active_experiments.count).to eq 1
expect(active_experiments.first[0]).to eq "ghi"
expect(active_experiments.first[1]).to eq another_alternative
end
end
describe "when user is a robot" do
before(:each) do
@request = OpenStruct.new(user_agent: "Googlebot/2.1 (+http://www.google.com/bot.html)")
end
describe "ab_test" do
it "should return the control" do
alternative = ab_test("link_color", "blue", "red")
expect(alternative).to eq experiment.control.name
end
it "should not create a experiment" do
ab_test("link_color", "blue", "red")
expect(Split::Experiment.new("link_color")).to be_a_new_record
end
it "should not increment the participation count" do
previous_red_count = Split::Alternative.new("red", "link_color").participant_count
previous_blue_count = Split::Alternative.new("blue", "link_color").participant_count
ab_test("link_color", "blue", "red")
new_red_count = Split::Alternative.new("red", "link_color").participant_count
new_blue_count = Split::Alternative.new("blue", "link_color").participant_count
expect((new_red_count + new_blue_count)).to eq(previous_red_count + previous_blue_count)
end
end
describe "finished" do
it "should not increment the completed count" do
alternative_name = ab_test("link_color", "blue", "red")
previous_completion_count = Split::Alternative.new(alternative_name, "link_color").completed_count
ab_finished("link_color")
new_completion_count = Split::Alternative.new(alternative_name, "link_color").completed_count
expect(new_completion_count).to eq(previous_completion_count)
end
end
end
describe "when providing custom ignore logic" do
context "using a proc to configure custom logic" do
before(:each) do
Split.configure do |c|
c.ignore_filter = proc { |request| true } # ignore everything
end
end
it "ignores the ab_test" do
ab_test("link_color", "blue", "red")
red_count = Split::Alternative.new("red", "link_color").participant_count
blue_count = Split::Alternative.new("blue", "link_color").participant_count
expect((red_count + blue_count)).to be(0)
end
end
end
shared_examples_for "a disabled test" do
describe "ab_test" do
it "should return the control" do
alternative = ab_test("link_color", "blue", "red")
expect(alternative).to eq experiment.control.name
end
it "should not increment the participation count" do
previous_red_count = Split::Alternative.new("red", "link_color").participant_count
previous_blue_count = Split::Alternative.new("blue", "link_color").participant_count
ab_test("link_color", "blue", "red")
new_red_count = Split::Alternative.new("red", "link_color").participant_count
new_blue_count = Split::Alternative.new("blue", "link_color").participant_count
expect((new_red_count + new_blue_count)).to eq(previous_red_count + previous_blue_count)
end
end
describe "finished" do
it "should not increment the completed count" do
alternative_name = ab_test("link_color", "blue", "red")
previous_completion_count = Split::Alternative.new(alternative_name, "link_color").completed_count
ab_finished("link_color")
new_completion_count = Split::Alternative.new(alternative_name, "link_color").completed_count
expect(new_completion_count).to eq(previous_completion_count)
end
end
end
describe "when ip address is ignored" do
context "individually" do
before(:each) do
@request = OpenStruct.new(ip: "81.19.48.130")
Split.configure do |c|
c.ignore_ip_addresses << "81.19.48.130"
end
end
it_behaves_like "a disabled test"
end
context "for a range" do
before(:each) do
@request = OpenStruct.new(ip: "81.19.48.129")
Split.configure do |c|
c.ignore_ip_addresses << /81\.19\.48\.[0-9]+/
end
end
it_behaves_like "a disabled test"
end
context "using both a range and a specific value" do
before(:each) do
@request = OpenStruct.new(ip: "81.19.48.128")
Split.configure do |c|
c.ignore_ip_addresses << "81.19.48.130"
c.ignore_ip_addresses << /81\.19\.48\.[0-9]+/
end
end
it_behaves_like "a disabled test"
end
context "when ignored other address" do
before do
@request = OpenStruct.new(ip: "1.1.1.1")
Split.configure do |c|
c.ignore_ip_addresses << "81.19.48.130"
end
end
it "works as usual" do
alternative_name = ab_test("link_color", "red", "blue")
expect {
ab_finished("link_color")
}.to change(Split::Alternative.new(alternative_name, "link_color"), :completed_count).by(1)
end
end
end
describe "when user is previewing" do
before(:each) do
@request = OpenStruct.new(headers: { "x-purpose" => "preview" })
end
it_behaves_like "a disabled test"
end
describe "versioned experiments" do
it "should use version zero if no version is present" do
alternative_name = ab_test("link_color", "blue", "red")
expect(experiment.version).to eq(0)
expect(ab_user["link_color"]).to eq(alternative_name)
end
it "should save the version of the experiment to the session" do
experiment.reset
expect(experiment.version).to eq(1)
alternative_name = ab_test("link_color", "blue", "red")
expect(ab_user["link_color:1"]).to eq(alternative_name)
end
it "should load the experiment even if the version is not 0" do
experiment.reset
expect(experiment.version).to eq(1)
alternative_name = ab_test("link_color", "blue", "red")
expect(ab_user["link_color:1"]).to eq(alternative_name)
return_alternative_name = ab_test("link_color", "blue", "red")
expect(return_alternative_name).to eq(alternative_name)
end
it "should reset the session of a user on an older version of the experiment" do
alternative_name = ab_test("link_color", "blue", "red")
expect(ab_user["link_color"]).to eq(alternative_name)
alternative = Split::Alternative.new(alternative_name, "link_color")
expect(alternative.participant_count).to eq(1)
experiment.reset
expect(experiment.version).to eq(1)
alternative = Split::Alternative.new(alternative_name, "link_color")
expect(alternative.participant_count).to eq(0)
new_alternative_name = ab_test("link_color", "blue", "red")
expect(ab_user["link_color:1"]).to eq(new_alternative_name)
new_alternative = Split::Alternative.new(new_alternative_name, "link_color")
expect(new_alternative.participant_count).to eq(1)
end
it "should cleanup old versions of experiments from the session" do
alternative_name = ab_test("link_color", "blue", "red")
expect(ab_user["link_color"]).to eq(alternative_name)
alternative = Split::Alternative.new(alternative_name, "link_color")
expect(alternative.participant_count).to eq(1)
experiment.reset
expect(experiment.version).to eq(1)
alternative = Split::Alternative.new(alternative_name, "link_color")
expect(alternative.participant_count).to eq(0)
new_alternative_name = ab_test("link_color", "blue", "red")
expect(ab_user["link_color:1"]).to eq(new_alternative_name)
end
it "should only count completion of users on the current version" do
alternative_name = ab_test("link_color", "blue", "red")
expect(ab_user["link_color"]).to eq(alternative_name)
Split::Alternative.new(alternative_name, "link_color")
experiment.reset
expect(experiment.version).to eq(1)
ab_finished("link_color")
alternative = Split::Alternative.new(alternative_name, "link_color")
expect(alternative.completed_count).to eq(0)
end
end
context "when redis is not available" do
before(:each) do
expect(Split).to receive(:redis).at_most(5).times.and_raise(Errno::ECONNREFUSED.new)
end
context "and db_failover config option is turned off" do
before(:each) do
Split.configure do |config|
config.db_failover = false
end
end
describe "ab_test" do
it "should raise an exception" do
expect { ab_test("link_color", "blue", "red") }.to raise_error(Errno::ECONNREFUSED)
end
end
describe "finished" do
it "should raise an exception" do
expect { ab_finished("link_color") }.to raise_error(Errno::ECONNREFUSED)
end
end
describe "disable split testing" do
before(:each) do
Split.configure do |config|
config.enabled = false
end
end
it "should not attempt to connect to redis" do
expect { ab_test("link_color", "blue", "red") }.not_to raise_error
end
it "should return control variable" do
expect(ab_test("link_color", "blue", "red")).to eq("blue")
expect { ab_finished("link_color") }.not_to raise_error
end
end
end
context "and db_failover config option is turned on" do
before(:each) do
Split.configure do |config|
config.db_failover = true
end
end
describe "ab_test" do
it "should not raise an exception" do
expect { ab_test("link_color", "blue", "red") }.not_to raise_error
end
it "should call db_failover_on_db_error proc with error as parameter" do
Split.configure do |config|
config.db_failover_on_db_error = proc do |error|
expect(error).to be_a(Errno::ECONNREFUSED)
end
end
expect(Split.configuration.db_failover_on_db_error).to receive(:call).and_call_original
ab_test("link_color", "blue", "red")
end
it "should always use first alternative" do
expect(ab_test("link_color", "blue", "red")).to eq("blue")
expect(ab_test("link_color", { "blue" => 0.01 }, "red" => 0.2)).to eq("blue")
expect(ab_test("link_color", { "blue" => 0.8 }, { "red" => 20 })).to eq("blue")
expect(ab_test("link_color", "blue", "red") do |alternative|
"shared/#{alternative}"
end).to eq("shared/blue")
end
context "and db_failover_allow_parameter_override config option is turned on" do
before(:each) do
Split.configure do |config|
config.db_failover_allow_parameter_override = true
end
end
context "and given an override parameter" do
it "should use given override instead of the first alternative" do
@params = { "ab_test" => { "link_color" => "red" } }
expect(ab_test("link_color", "blue", "red")).to eq("red")
expect(ab_test("link_color", "blue", "red", "green")).to eq("red")
expect(ab_test("link_color", { "blue" => 0.01 }, "red" => 0.2)).to eq("red")
expect(ab_test("link_color", { "blue" => 0.8 }, { "red" => 20 })).to eq("red")
expect(ab_test("link_color", "blue", "red") do |alternative|
"shared/#{alternative}"
end).to eq("shared/red")
end
end
end
context "and preloaded config given" do
before do
Split.configuration.experiments[:link_color] = {
alternatives: [ "blue", "red" ],
}
end
it "uses first alternative" do
expect(ab_test(:link_color)).to eq("blue")
end
end
end
describe "finished" do
it "should not raise an exception" do
expect { ab_finished("link_color") }.not_to raise_error
end
it "should call db_failover_on_db_error proc with error as parameter" do
Split.configure do |config|
config.db_failover_on_db_error = proc do |error|
expect(error).to be_a(Errno::ECONNREFUSED)
end
end
expect(Split.configuration.db_failover_on_db_error).to receive(:call).and_call_original
ab_finished("link_color")
end
end
end
end
context "with preloaded config" do
before { Split.configuration.experiments = {} }
it "pulls options from config file" do
Split.configuration.experiments[:my_experiment] = {
alternatives: [ "control_opt", "other_opt" ],
goals: ["goal1", "goal2"]
}
ab_test :my_experiment
expect(Split::Experiment.new(:my_experiment).alternatives.map(&:name)).to eq([ "control_opt", "other_opt" ])
expect(Split::Experiment.new(:my_experiment).goals).to eq([ "goal1", "goal2" ])
end
it "can be called multiple times" do
Split.configuration.experiments[:my_experiment] = {
alternatives: [ "control_opt", "other_opt" ],
goals: ["goal1", "goal2"]
}
5.times { ab_test :my_experiment }
experiment = Split::Experiment.new(:my_experiment)
expect(experiment.alternatives.map(&:name)).to eq([ "control_opt", "other_opt" ])
expect(experiment.goals).to eq([ "goal1", "goal2" ])
expect(experiment.participant_count).to eq(1)
end
it "accepts multiple goals" do
Split.configuration.experiments[:my_experiment] = {
alternatives: [ "control_opt", "other_opt" ],
goals: [ "goal1", "goal2", "goal3" ]
}
ab_test :my_experiment
experiment = Split::Experiment.new(:my_experiment)
expect(experiment.goals).to eq([ "goal1", "goal2", "goal3" ])
end
it "allow specifying goals to be optional" do
Split.configuration.experiments[:my_experiment] = {
alternatives: [ "control_opt", "other_opt" ]
}
experiment = Split::Experiment.new(:my_experiment)
expect(experiment.goals).to eq([])
end
it "accepts multiple alternatives" do
Split.configuration.experiments[:my_experiment] = {
alternatives: [ "control_opt", "second_opt", "third_opt" ],
}
ab_test :my_experiment
experiment = Split::Experiment.new(:my_experiment)
expect(experiment.alternatives.map(&:name)).to eq([ "control_opt", "second_opt", "third_opt" ])
end
it "accepts probability on alternatives" do
Split.configuration.experiments[:my_experiment] = {
alternatives: [
{ name: "control_opt", percent: 67 },
{ name: "second_opt", percent: 10 },
{ name: "third_opt", percent: 23 },
],
}
ab_test :my_experiment
experiment = Split::Experiment.new(:my_experiment)
expect(experiment.alternatives.collect { |a| [a.name, a.weight] }).to eq([["control_opt", 0.67], ["second_opt", 0.1], ["third_opt", 0.23]])
end
it "accepts probability on some alternatives" do
Split.configuration.experiments[:my_experiment] = {
alternatives: [
{ name: "control_opt", percent: 34 },
"second_opt",
{ name: "third_opt", percent: 23 },
"fourth_opt",
],
}
ab_test :my_experiment
experiment = Split::Experiment.new(:my_experiment)
names_and_weights = experiment.alternatives.collect { |a| [a.name, a.weight] }
expect(names_and_weights).to eq([["control_opt", 0.34], ["second_opt", 0.215], ["third_opt", 0.23], ["fourth_opt", 0.215]])
expect(names_and_weights.inject(0) { |sum, nw| sum + nw[1] }).to eq(1.0)
end
it "allows name param without probability" do
Split.configuration.experiments[:my_experiment] = {
alternatives: [
{ name: "control_opt" },
"second_opt",
{ name: "third_opt", percent: 64 },
],
}
ab_test :my_experiment
experiment = Split::Experiment.new(:my_experiment)
names_and_weights = experiment.alternatives.collect { |a| [a.name, a.weight] }
expect(names_and_weights).to eq([["control_opt", 0.18], ["second_opt", 0.18], ["third_opt", 0.64]])
expect(names_and_weights.inject(0) { |sum, nw| sum + nw[1] }).to eq(1.0)
end
it "fails gracefully if config is missing experiment" do
Split.configuration.experiments = { other_experiment: { foo: "Bar" } }
expect { ab_test :my_experiment }.to raise_error(Split::ExperimentNotFound)
end
it "fails gracefully if config is missing" do
expect { Split.configuration.experiments = nil }.to raise_error(Split::InvalidExperimentsFormatError)
end
it "fails gracefully if config is missing alternatives" do
Split.configuration.experiments[:my_experiment] = { foo: "Bar" }
expect { ab_test :my_experiment }.to raise_error(NoMethodError)
end
end
it "should handle multiple experiments correctly" do
experiment2 = Split::ExperimentCatalog.find_or_create("link_color2", "blue", "red")
ab_test("link_color", "blue", "red")
ab_test("link_color2", "blue", "red")
ab_finished("link_color2")
experiment2.alternatives.each do |alt|
expect(alt.unfinished_count).to eq(0)
end
end
context "with goals" do
before do
@experiment = { "link_color" => ["purchase", "refund"] }
@alternatives = ["blue", "red"]
@experiment_name, @goals = normalize_metric(@experiment)
@goal1 = @goals[0]
@goal2 = @goals[1]
end
it "should normalize experiment" do
expect(@experiment_name).to eq("link_color")
expect(@goals).to eq(["purchase", "refund"])
end
describe "ab_test" do
it "should allow experiment goals interface as a single hash" do
ab_test(@experiment, *@alternatives)
experiment = Split::ExperimentCatalog.find("link_color")
expect(experiment.goals).to eq(["purchase", "refund"])
end
end
describe "ab_finished" do
before do
@alternative_name = ab_test(@experiment, *@alternatives)
end
it "should increment the counter for the specified-goal completed alternative" do
expect { ab_finished({ "link_color" => ["purchase"] }) }
.to change { Split::Alternative.new(@alternative_name, @experiment_name).completed_count(@goal2) }.by(0)
.and change { Split::Alternative.new(@alternative_name, @experiment_name).completed_count(@goal1) }.by(1)
end
end
end
end
<MSG> Merge pull request #62 from bhcarpenter/allow_override_param_without_redis
Allow parameter overrides, even without Redis.
<DFF> @@ -410,6 +410,27 @@ describe Split::Helper do
"shared/#{alternative}"
end.should eq('shared/blue')
end
+
+ context 'and db_failover_allow_parameter_override config option is turned on' do
+ before(:each) do
+ Split.configure do |config|
+ config.db_failover_allow_parameter_override = true
+ end
+ end
+
+ context 'and given an override parameter' do
+ it 'should use given override instead of the first alternative' do
+ @params = {'link_color' => 'red'}
+ ab_test('link_color', 'blue', 'red').should eq('red')
+ ab_test('link_color', 'blue', 'red', 'green').should eq('red')
+ ab_test('link_color', {'blue' => 0.01}, 'red' => 0.2).should eq('red')
+ ab_test('link_color', {'blue' => 0.8}, {'red' => 20}).should eq('red')
+ ab_test('link_color', 'blue', 'red') do |alternative|
+ "shared/#{alternative}"
+ end.should eq('shared/red')
+ end
+ end
+ end
end
describe 'finished' do
| 21 | Merge pull request #62 from bhcarpenter/allow_override_param_without_redis | 0 | .rb | rb | mit | splitrb/split |
10071063 | <NME> helper_spec.rb
<BEF> # frozen_string_literal: true
require "spec_helper"
# TODO change some of these tests to use Rack::Test
describe Split::Helper do
include Split::Helper
let(:experiment) {
Split::ExperimentCatalog.find_or_create("link_color", "blue", "red")
}
describe "ab_test" do
it "should not raise an error when passed strings for alternatives" do
expect { ab_test("xyz", "1", "2", "3") }.not_to raise_error
end
it "should not raise an error when passed an array for alternatives" do
expect { ab_test("xyz", ["1", "2", "3"]) }.not_to raise_error
end
it "should raise the appropriate error when passed integers for alternatives" do
expect { ab_test("xyz", 1, 2, 3) }.to raise_error(ArgumentError)
end
it "should raise the appropriate error when passed symbols for alternatives" do
expect { ab_test("xyz", :a, :b, :c) }.to raise_error(ArgumentError)
end
it "should not raise error when passed an array for goals" do
expect { ab_test({ "link_color" => ["purchase", "refund"] }, "blue", "red") }.not_to raise_error
end
it "should not raise error when passed just one goal" do
expect { ab_test({ "link_color" => "purchase" }, "blue", "red") }.not_to raise_error
end
it "raises an appropriate error when processing combined expirements" do
Split.configuration.experiments = {
combined_exp_1: {
alternatives: [ { name: "control", percent: 50 }, { name: "test-alt", percent: 50 } ],
metric: :my_metric,
combined_experiments: [:combined_exp_1_sub_1]
}
}
Split::ExperimentCatalog.find_or_create("combined_exp_1")
expect { ab_test("combined_exp_1") }.to raise_error(Split::InvalidExperimentsFormatError)
end
it "should assign a random alternative to a new user when there are an equal number of alternatives assigned" do
ab_test("link_color", "blue", "red")
expect(["red", "blue"]).to include(ab_user["link_color"])
end
it "should increment the participation counter after assignment to a new user" do
previous_red_count = Split::Alternative.new("red", "link_color").participant_count
previous_blue_count = Split::Alternative.new("blue", "link_color").participant_count
ab_test("link_color", "blue", "red")
new_red_count = Split::Alternative.new("red", "link_color").participant_count
new_blue_count = Split::Alternative.new("blue", "link_color").participant_count
expect((new_red_count + new_blue_count)).to eq(previous_red_count + previous_blue_count + 1)
end
it "should not increment the counter for an experiment that the user is not participating in" do
ab_test("link_color", "blue", "red")
e = Split::ExperimentCatalog.find_or_create("button_size", "small", "big")
expect {
# User shouldn't participate in this second experiment
ab_test("button_size", "small", "big")
}.not_to change { e.participant_count }
end
it "should not increment the counter for an ended experiment" do
e = Split::ExperimentCatalog.find_or_create("button_size", "small", "big")
e.winner = "small"
expect {
a = ab_test("button_size", "small", "big")
expect(a).to eq("small")
}.not_to change { e.participant_count }
end
it "should not increment the counter for an not started experiment" do
expect(Split.configuration).to receive(:start_manually).and_return(true)
e = Split::ExperimentCatalog.find_or_create("button_size", "small", "big")
expect {
a = ab_test("button_size", "small", "big")
expect(a).to eq("small")
}.not_to change { e.participant_count }
end
it "should return the given alternative for an existing user" do
expect(ab_test("link_color", "blue", "red")).to eq ab_test("link_color", "blue", "red")
end
it "should always return the winner if one is present" do
experiment.winner = "orange"
expect(ab_test("link_color", "blue", "red")).to eq("orange")
end
it "should allow the alternative to be forced by passing it in the params" do
# ?ab_test[link_color]=blue
@params = { "ab_test" => { "link_color" => "blue" } }
alternative = ab_test("link_color", "blue", "red")
expect(alternative).to eq("blue")
alternative = ab_test("link_color", { "blue" => 1 }, "red" => 5)
expect(alternative).to eq("blue")
@params = { "ab_test" => { "link_color" => "red" } }
alternative = ab_test("link_color", "blue", "red")
expect(alternative).to eq("red")
alternative = ab_test("link_color", { "blue" => 5 }, "red" => 1)
expect(alternative).to eq("red")
end
it "should not allow an arbitrary alternative" do
@params = { "ab_test" => { "link_color" => "pink" } }
alternative = ab_test("link_color", "blue")
expect(alternative).to eq("blue")
end
it "should not store the split when a param forced alternative" do
@params = { "ab_test" => { "link_color" => "blue" } }
expect(ab_user).not_to receive(:[]=)
ab_test("link_color", "blue", "red")
end
it "SPLIT_DISABLE query parameter should also force the alternative (uses control)" do
@params = { "SPLIT_DISABLE" => "true" }
alternative = ab_test("link_color", "blue", "red")
expect(alternative).to eq("blue")
alternative = ab_test("link_color", { "blue" => 1 }, "red" => 5)
expect(alternative).to eq("blue")
alternative = ab_test("link_color", "red", "blue")
expect(alternative).to eq("red")
alternative = ab_test("link_color", { "red" => 5 }, "blue" => 1)
expect(alternative).to eq("red")
end
it "should not store the split when Split generically disabled" do
@params = { "SPLIT_DISABLE" => "true" }
expect(ab_user).not_to receive(:[]=)
ab_test("link_color", "blue", "red")
end
context "when store_override is set" do
before { Split.configuration.store_override = true }
it "should store the forced alternative" do
@params = { "ab_test" => { "link_color" => "blue" } }
expect(ab_user).to receive(:[]=).with("link_color", "blue")
ab_test("link_color", "blue", "red")
end
end
context "when on_trial_choose is set" do
before { Split.configuration.on_trial_choose = :some_method }
it "should call the method" do
expect(self).to receive(:some_method)
ab_test("link_color", "blue", "red")
end
end
it "should allow passing a block" do
alt = ab_test("link_color", "blue", "red")
ret = ab_test("link_color", "blue", "red") { |alternative| "shared/#{alternative}" }
expect(ret).to eq("shared/#{alt}")
end
it "should allow the share of visitors see an alternative to be specified" do
ab_test("link_color", { "blue" => 0.8 }, { "red" => 20 })
expect(["red", "blue"]).to include(ab_user["link_color"])
end
it "should allow alternative weighting interface as a single hash" do
ab_test("link_color", { "blue" => 0.01 }, "red" => 0.2)
experiment = Split::ExperimentCatalog.find("link_color")
expect(experiment.alternatives.map(&:name)).to eq(["blue", "red"])
expect(experiment.alternatives.collect { |a| a.weight }).to match_array([0.01, 0.2])
end
it "should only let a user participate in one experiment at a time" do
link_color = ab_test("link_color", "blue", "red")
ab_test("button_size", "small", "big")
expect(ab_user["link_color"]).to eq(link_color)
big = Split::Alternative.new("big", "button_size")
expect(big.participant_count).to eq(0)
small = Split::Alternative.new("small", "button_size")
expect(small.participant_count).to eq(0)
end
it "should let a user participate in many experiment with allow_multiple_experiments option" do
Split.configure do |config|
config.allow_multiple_experiments = true
end
link_color = ab_test("link_color", "blue", "red")
button_size = ab_test("button_size", "small", "big")
expect(ab_user["link_color"]).to eq(link_color)
expect(ab_user["button_size"]).to eq(button_size)
button_size_alt = Split::Alternative.new(button_size, "button_size")
expect(button_size_alt.participant_count).to eq(1)
end
context "with allow_multiple_experiments = 'control'" do
it "should let a user participate in many experiment with one non-'control' alternative" do
Split.configure do |config|
config.allow_multiple_experiments = "control"
end
groups = 100.times.map do |n|
ab_test("test#{n}".to_sym, { "control" => (100 - n) }, { "test#{n}-alt" => n })
end
experiments = ab_user.active_experiments
expect(experiments.size).to be > 1
count_control = experiments.values.count { |g| g == "control" }
expect(count_control).to eq(experiments.size - 1)
count_alts = groups.count { |g| g != "control" }
expect(count_alts).to eq(1)
end
context "when user already has experiment" do
let(:mock_user) { Split::User.new(self, { "test_0" => "test-alt" }) }
before do
Split.configure do |config|
config.allow_multiple_experiments = "control"
end
Split::ExperimentCatalog.find_or_initialize("test_0", "control", "test-alt").save
Split::ExperimentCatalog.find_or_initialize("test_1", "control", "test-alt").save
end
it "should restore previously selected alternative" do
expect(ab_user.active_experiments.size).to eq 1
expect(ab_test(:test_0, { "control" => 100 }, { "test-alt" => 1 })).to eq "test-alt"
expect(ab_test(:test_0, { "control" => 1 }, { "test-alt" => 100 })).to eq "test-alt"
end
it "should select the correct alternatives after experiment resets" do
experiment = Split::ExperimentCatalog.find(:test_0)
experiment.reset
mock_user[experiment.key] = "test-alt"
expect(ab_user.active_experiments.size).to eq 1
expect(ab_test(:test_0, { "control" => 100 }, { "test-alt" => 1 })).to eq "test-alt"
expect(ab_test(:test_0, { "control" => 0 }, { "test-alt" => 100 })).to eq "test-alt"
end
it "lets override existing choice" do
pending "this requires user store reset on first call not depending on whelther it is current trial"
@params = { "ab_test" => { "test_1" => "test-alt" } }
expect(ab_test(:test_0, { "control" => 0 }, { "test-alt" => 100 })).to eq "control"
expect(ab_test(:test_1, { "control" => 100 }, { "test-alt" => 1 })).to eq "test-alt"
end
end
end
it "should not over-write a finished key when an experiment is on a later version" do
experiment.increment_version
ab_user = { experiment.key => "blue", experiment.finished_key => true }
finished_session = ab_user.dup
ab_test("link_color", "blue", "red")
expect(ab_user).to eq(finished_session)
end
end
describe "metadata" do
context "is defined" do
before do
Split.configuration.experiments = {
my_experiment: {
alternatives: ["one", "two"],
resettable: false,
metadata: { "one" => "Meta1", "two" => "Meta2" }
}
}
end
it "should be passed to helper block" do
@params = { "ab_test" => { "my_experiment" => "two" } }
expect(ab_test("my_experiment")).to eq "two"
expect(ab_test("my_experiment") do |alternative, meta|
meta
end).to eq("Meta2")
end
it "should pass control metadata helper block if library disabled" do
Split.configure do |config|
config.enabled = false
end
expect(ab_test("my_experiment")).to eq "one"
expect(ab_test("my_experiment") do |_, meta|
meta
end).to eq("Meta1")
end
end
context "is not defined" do
before do
Split.configuration.experiments = {
my_experiment: {
alternatives: ["one", "two"],
resettable: false,
metadata: nil
}
}
end
it "should be passed to helper block" do
expect(ab_test("my_experiment") do |alternative, meta|
meta
end).to eq({})
end
it "should pass control metadata helper block if library disabled" do
Split.configure do |config|
config.enabled = false
end
expect(ab_test("my_experiment") do |_, meta|
meta
end).to eq({})
end
end
end
describe "ab_finished" do
context "for an experiment that the user participates in" do
before(:each) do
@experiment_name = "link_color"
@alternatives = ["blue", "red"]
@experiment = Split::ExperimentCatalog.find_or_create(@experiment_name, *@alternatives)
@alternative_name = ab_test(@experiment_name, *@alternatives)
@previous_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count
end
it "should increment the counter for the completed alternative" do
ab_finished(@experiment_name)
new_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count
expect(new_completion_count).to eq(@previous_completion_count + 1)
end
it "should set experiment's finished key if reset is false" do
ab_finished(@experiment_name, { reset: false })
expect(ab_user[@experiment.key]).to eq(@alternative_name)
expect(ab_user[@experiment.finished_key]).to eq(true)
end
it "should not increment the counter if reset is false and the experiment has been already finished" do
2.times { ab_finished(@experiment_name, { reset: false }) }
new_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count
expect(new_completion_count).to eq(@previous_completion_count + 1)
end
it "should not increment the counter for an ended experiment" do
e = Split::ExperimentCatalog.find_or_create("button_size", "small", "big")
e.winner = "small"
a = ab_test("button_size", "small", "big")
expect(a).to eq("small")
expect {
ab_finished("button_size")
}.not_to change { Split::Alternative.new(a, "button_size").completed_count }
end
it "should clear out the user's participation from their session" do
expect(ab_user[@experiment.key]).to eq(@alternative_name)
ab_finished(@experiment_name)
expect(ab_user.keys).to be_empty
end
it "should not clear out the users session if reset is false" do
expect(ab_user[@experiment.key]).to eq(@alternative_name)
ab_finished(@experiment_name, { reset: false })
expect(ab_user[@experiment.key]).to eq(@alternative_name)
expect(ab_user[@experiment.finished_key]).to eq(true)
end
it "should reset the users session when experiment is not versioned" do
expect(ab_user[@experiment.key]).to eq(@alternative_name)
ab_finished(@experiment_name)
expect(ab_user.keys).to be_empty
end
it "should reset the users session when experiment is versioned" do
@experiment.increment_version
@alternative_name = ab_test(@experiment_name, *@alternatives)
expect(ab_user[@experiment.key]).to eq(@alternative_name)
ab_finished(@experiment_name)
expect(ab_user.keys).to be_empty
end
context "when on_trial_complete is set" do
before { Split.configuration.on_trial_complete = :some_method }
it "should call the method" do
expect(self).to receive(:some_method)
ab_finished(@experiment_name)
"shared/#{alternative}"
end.should eq('shared/blue')
end
end
describe 'finished' do
end
context "for an experiment that the user does not participate in" do
before do
Split::ExperimentCatalog.find_or_create(:not_started_experiment, "control", "alt")
end
it "should not raise an exception" do
expect { ab_finished(:not_started_experiment) }.not_to raise_exception
end
it "should not change the user state when reset is false" do
expect { ab_finished(:not_started_experiment, reset: false) }.not_to change { ab_user.keys }.from([])
end
it "should not change the user state when reset is true" do
expect(self).not_to receive(:reset!)
ab_finished(:not_started_experiment)
end
it "should not increment the completed counter" do
ab_finished(:not_started_experiment)
expect(Split::Alternative.new("control", :not_started_experiment).completed_count).to eq(0)
expect(Split::Alternative.new("alt", :not_started_experiment).completed_count).to eq(0)
end
end
end
context "finished with config" do
it "passes reset option" do
Split.configuration.experiments = {
my_experiment: {
alternatives: ["one", "two"],
resettable: false,
}
}
alternative = ab_test(:my_experiment)
experiment = Split::ExperimentCatalog.find :my_experiment
ab_finished :my_experiment
expect(ab_user[experiment.key]).to eq(alternative)
expect(ab_user[experiment.finished_key]).to eq(true)
end
end
context "finished with metric name" do
before { Split.configuration.experiments = {} }
before { expect(Split::Alternative).to receive(:new).at_least(1).times.and_call_original }
def should_finish_experiment(experiment_name, should_finish = true)
alts = Split.configuration.experiments[experiment_name][:alternatives]
experiment = Split::ExperimentCatalog.find_or_create(experiment_name, *alts)
alt_name = ab_user[experiment.key] = alts.first
alt = double("alternative")
expect(alt).to receive(:name).at_most(1).times.and_return(alt_name)
expect(Split::Alternative).to receive(:new).at_most(1).times.with(alt_name, experiment_name.to_s).and_return(alt)
if should_finish
expect(alt).to receive(:increment_completion).at_most(1).times
else
expect(alt).not_to receive(:increment_completion)
end
end
it "completes the test" do
Split.configuration.experiments[:my_experiment] = {
alternatives: [ "control_opt", "other_opt" ],
metric: :my_metric
}
should_finish_experiment :my_experiment
ab_finished :my_metric
end
it "completes all relevant tests" do
Split.configuration.experiments = {
exp_1: {
alternatives: [ "1-1", "1-2" ],
metric: :my_metric
},
exp_2: {
alternatives: [ "2-1", "2-2" ],
metric: :another_metric
},
exp_3: {
alternatives: [ "3-1", "3-2" ],
metric: :my_metric
},
}
should_finish_experiment :exp_1
should_finish_experiment :exp_2, false
should_finish_experiment :exp_3
ab_finished :my_metric
end
it "passes reset option" do
Split.configuration.experiments = {
my_exp: {
alternatives: ["one", "two"],
metric: :my_metric,
resettable: false,
}
}
alternative_name = ab_test(:my_exp)
exp = Split::ExperimentCatalog.find :my_exp
ab_finished :my_metric
expect(ab_user[exp.key]).to eq(alternative_name)
expect(ab_user[exp.finished_key]).to be_truthy
end
it "passes through options" do
Split.configuration.experiments = {
my_exp: {
alternatives: ["one", "two"],
metric: :my_metric,
}
}
alternative_name = ab_test(:my_exp)
exp = Split::ExperimentCatalog.find :my_exp
ab_finished :my_metric, reset: false
expect(ab_user[exp.key]).to eq(alternative_name)
expect(ab_user[exp.finished_key]).to be_truthy
end
end
describe "conversions" do
it "should return a conversion rate for an alternative" do
alternative_name = ab_test("link_color", "blue", "red")
previous_convertion_rate = Split::Alternative.new(alternative_name, "link_color").conversion_rate
expect(previous_convertion_rate).to eq(0.0)
ab_finished("link_color")
new_convertion_rate = Split::Alternative.new(alternative_name, "link_color").conversion_rate
expect(new_convertion_rate).to eq(1.0)
end
end
describe "active experiments" do
it "should show an active test" do
alternative = ab_test("def", "4", "5", "6")
expect(active_experiments.count).to eq 1
expect(active_experiments.first[0]).to eq "def"
expect(active_experiments.first[1]).to eq alternative
end
it "should show a finished test" do
alternative = ab_test("def", "4", "5", "6")
ab_finished("def", { reset: false })
expect(active_experiments.count).to eq 1
expect(active_experiments.first[0]).to eq "def"
expect(active_experiments.first[1]).to eq alternative
end
it "should show an active test when an experiment is on a later version" do
experiment.reset
expect(experiment.version).to eq(1)
ab_test("link_color", "blue", "red")
expect(active_experiments.count).to eq 1
expect(active_experiments.first[0]).to eq "link_color"
end
it "should show versioned tests properly" do
10.times { experiment.reset }
alternative = ab_test(experiment.name, "blue", "red")
ab_finished(experiment.name, reset: false)
expect(experiment.version).to eq(10)
expect(active_experiments.count).to eq 1
expect(active_experiments).to eq({ "link_color" => alternative })
end
it "should show multiple tests" do
Split.configure do |config|
config.allow_multiple_experiments = true
end
alternative = ab_test("def", "4", "5", "6")
another_alternative = ab_test("ghi", "7", "8", "9")
expect(active_experiments.count).to eq 2
expect(active_experiments["def"]).to eq alternative
expect(active_experiments["ghi"]).to eq another_alternative
end
it "should not show tests with winners" do
Split.configure do |config|
config.allow_multiple_experiments = true
end
e = Split::ExperimentCatalog.find_or_create("def", "4", "5", "6")
e.winner = "4"
ab_test("def", "4", "5", "6")
another_alternative = ab_test("ghi", "7", "8", "9")
expect(active_experiments.count).to eq 1
expect(active_experiments.first[0]).to eq "ghi"
expect(active_experiments.first[1]).to eq another_alternative
end
end
describe "when user is a robot" do
before(:each) do
@request = OpenStruct.new(user_agent: "Googlebot/2.1 (+http://www.google.com/bot.html)")
end
describe "ab_test" do
it "should return the control" do
alternative = ab_test("link_color", "blue", "red")
expect(alternative).to eq experiment.control.name
end
it "should not create a experiment" do
ab_test("link_color", "blue", "red")
expect(Split::Experiment.new("link_color")).to be_a_new_record
end
it "should not increment the participation count" do
previous_red_count = Split::Alternative.new("red", "link_color").participant_count
previous_blue_count = Split::Alternative.new("blue", "link_color").participant_count
ab_test("link_color", "blue", "red")
new_red_count = Split::Alternative.new("red", "link_color").participant_count
new_blue_count = Split::Alternative.new("blue", "link_color").participant_count
expect((new_red_count + new_blue_count)).to eq(previous_red_count + previous_blue_count)
end
end
describe "finished" do
it "should not increment the completed count" do
alternative_name = ab_test("link_color", "blue", "red")
previous_completion_count = Split::Alternative.new(alternative_name, "link_color").completed_count
ab_finished("link_color")
new_completion_count = Split::Alternative.new(alternative_name, "link_color").completed_count
expect(new_completion_count).to eq(previous_completion_count)
end
end
end
describe "when providing custom ignore logic" do
context "using a proc to configure custom logic" do
before(:each) do
Split.configure do |c|
c.ignore_filter = proc { |request| true } # ignore everything
end
end
it "ignores the ab_test" do
ab_test("link_color", "blue", "red")
red_count = Split::Alternative.new("red", "link_color").participant_count
blue_count = Split::Alternative.new("blue", "link_color").participant_count
expect((red_count + blue_count)).to be(0)
end
end
end
shared_examples_for "a disabled test" do
describe "ab_test" do
it "should return the control" do
alternative = ab_test("link_color", "blue", "red")
expect(alternative).to eq experiment.control.name
end
it "should not increment the participation count" do
previous_red_count = Split::Alternative.new("red", "link_color").participant_count
previous_blue_count = Split::Alternative.new("blue", "link_color").participant_count
ab_test("link_color", "blue", "red")
new_red_count = Split::Alternative.new("red", "link_color").participant_count
new_blue_count = Split::Alternative.new("blue", "link_color").participant_count
expect((new_red_count + new_blue_count)).to eq(previous_red_count + previous_blue_count)
end
end
describe "finished" do
it "should not increment the completed count" do
alternative_name = ab_test("link_color", "blue", "red")
previous_completion_count = Split::Alternative.new(alternative_name, "link_color").completed_count
ab_finished("link_color")
new_completion_count = Split::Alternative.new(alternative_name, "link_color").completed_count
expect(new_completion_count).to eq(previous_completion_count)
end
end
end
describe "when ip address is ignored" do
context "individually" do
before(:each) do
@request = OpenStruct.new(ip: "81.19.48.130")
Split.configure do |c|
c.ignore_ip_addresses << "81.19.48.130"
end
end
it_behaves_like "a disabled test"
end
context "for a range" do
before(:each) do
@request = OpenStruct.new(ip: "81.19.48.129")
Split.configure do |c|
c.ignore_ip_addresses << /81\.19\.48\.[0-9]+/
end
end
it_behaves_like "a disabled test"
end
context "using both a range and a specific value" do
before(:each) do
@request = OpenStruct.new(ip: "81.19.48.128")
Split.configure do |c|
c.ignore_ip_addresses << "81.19.48.130"
c.ignore_ip_addresses << /81\.19\.48\.[0-9]+/
end
end
it_behaves_like "a disabled test"
end
context "when ignored other address" do
before do
@request = OpenStruct.new(ip: "1.1.1.1")
Split.configure do |c|
c.ignore_ip_addresses << "81.19.48.130"
end
end
it "works as usual" do
alternative_name = ab_test("link_color", "red", "blue")
expect {
ab_finished("link_color")
}.to change(Split::Alternative.new(alternative_name, "link_color"), :completed_count).by(1)
end
end
end
describe "when user is previewing" do
before(:each) do
@request = OpenStruct.new(headers: { "x-purpose" => "preview" })
end
it_behaves_like "a disabled test"
end
describe "versioned experiments" do
it "should use version zero if no version is present" do
alternative_name = ab_test("link_color", "blue", "red")
expect(experiment.version).to eq(0)
expect(ab_user["link_color"]).to eq(alternative_name)
end
it "should save the version of the experiment to the session" do
experiment.reset
expect(experiment.version).to eq(1)
alternative_name = ab_test("link_color", "blue", "red")
expect(ab_user["link_color:1"]).to eq(alternative_name)
end
it "should load the experiment even if the version is not 0" do
experiment.reset
expect(experiment.version).to eq(1)
alternative_name = ab_test("link_color", "blue", "red")
expect(ab_user["link_color:1"]).to eq(alternative_name)
return_alternative_name = ab_test("link_color", "blue", "red")
expect(return_alternative_name).to eq(alternative_name)
end
it "should reset the session of a user on an older version of the experiment" do
alternative_name = ab_test("link_color", "blue", "red")
expect(ab_user["link_color"]).to eq(alternative_name)
alternative = Split::Alternative.new(alternative_name, "link_color")
expect(alternative.participant_count).to eq(1)
experiment.reset
expect(experiment.version).to eq(1)
alternative = Split::Alternative.new(alternative_name, "link_color")
expect(alternative.participant_count).to eq(0)
new_alternative_name = ab_test("link_color", "blue", "red")
expect(ab_user["link_color:1"]).to eq(new_alternative_name)
new_alternative = Split::Alternative.new(new_alternative_name, "link_color")
expect(new_alternative.participant_count).to eq(1)
end
it "should cleanup old versions of experiments from the session" do
alternative_name = ab_test("link_color", "blue", "red")
expect(ab_user["link_color"]).to eq(alternative_name)
alternative = Split::Alternative.new(alternative_name, "link_color")
expect(alternative.participant_count).to eq(1)
experiment.reset
expect(experiment.version).to eq(1)
alternative = Split::Alternative.new(alternative_name, "link_color")
expect(alternative.participant_count).to eq(0)
new_alternative_name = ab_test("link_color", "blue", "red")
expect(ab_user["link_color:1"]).to eq(new_alternative_name)
end
it "should only count completion of users on the current version" do
alternative_name = ab_test("link_color", "blue", "red")
expect(ab_user["link_color"]).to eq(alternative_name)
Split::Alternative.new(alternative_name, "link_color")
experiment.reset
expect(experiment.version).to eq(1)
ab_finished("link_color")
alternative = Split::Alternative.new(alternative_name, "link_color")
expect(alternative.completed_count).to eq(0)
end
end
context "when redis is not available" do
before(:each) do
expect(Split).to receive(:redis).at_most(5).times.and_raise(Errno::ECONNREFUSED.new)
end
context "and db_failover config option is turned off" do
before(:each) do
Split.configure do |config|
config.db_failover = false
end
end
describe "ab_test" do
it "should raise an exception" do
expect { ab_test("link_color", "blue", "red") }.to raise_error(Errno::ECONNREFUSED)
end
end
describe "finished" do
it "should raise an exception" do
expect { ab_finished("link_color") }.to raise_error(Errno::ECONNREFUSED)
end
end
describe "disable split testing" do
before(:each) do
Split.configure do |config|
config.enabled = false
end
end
it "should not attempt to connect to redis" do
expect { ab_test("link_color", "blue", "red") }.not_to raise_error
end
it "should return control variable" do
expect(ab_test("link_color", "blue", "red")).to eq("blue")
expect { ab_finished("link_color") }.not_to raise_error
end
end
end
context "and db_failover config option is turned on" do
before(:each) do
Split.configure do |config|
config.db_failover = true
end
end
describe "ab_test" do
it "should not raise an exception" do
expect { ab_test("link_color", "blue", "red") }.not_to raise_error
end
it "should call db_failover_on_db_error proc with error as parameter" do
Split.configure do |config|
config.db_failover_on_db_error = proc do |error|
expect(error).to be_a(Errno::ECONNREFUSED)
end
end
expect(Split.configuration.db_failover_on_db_error).to receive(:call).and_call_original
ab_test("link_color", "blue", "red")
end
it "should always use first alternative" do
expect(ab_test("link_color", "blue", "red")).to eq("blue")
expect(ab_test("link_color", { "blue" => 0.01 }, "red" => 0.2)).to eq("blue")
expect(ab_test("link_color", { "blue" => 0.8 }, { "red" => 20 })).to eq("blue")
expect(ab_test("link_color", "blue", "red") do |alternative|
"shared/#{alternative}"
end).to eq("shared/blue")
end
context "and db_failover_allow_parameter_override config option is turned on" do
before(:each) do
Split.configure do |config|
config.db_failover_allow_parameter_override = true
end
end
context "and given an override parameter" do
it "should use given override instead of the first alternative" do
@params = { "ab_test" => { "link_color" => "red" } }
expect(ab_test("link_color", "blue", "red")).to eq("red")
expect(ab_test("link_color", "blue", "red", "green")).to eq("red")
expect(ab_test("link_color", { "blue" => 0.01 }, "red" => 0.2)).to eq("red")
expect(ab_test("link_color", { "blue" => 0.8 }, { "red" => 20 })).to eq("red")
expect(ab_test("link_color", "blue", "red") do |alternative|
"shared/#{alternative}"
end).to eq("shared/red")
end
end
end
context "and preloaded config given" do
before do
Split.configuration.experiments[:link_color] = {
alternatives: [ "blue", "red" ],
}
end
it "uses first alternative" do
expect(ab_test(:link_color)).to eq("blue")
end
end
end
describe "finished" do
it "should not raise an exception" do
expect { ab_finished("link_color") }.not_to raise_error
end
it "should call db_failover_on_db_error proc with error as parameter" do
Split.configure do |config|
config.db_failover_on_db_error = proc do |error|
expect(error).to be_a(Errno::ECONNREFUSED)
end
end
expect(Split.configuration.db_failover_on_db_error).to receive(:call).and_call_original
ab_finished("link_color")
end
end
end
end
context "with preloaded config" do
before { Split.configuration.experiments = {} }
it "pulls options from config file" do
Split.configuration.experiments[:my_experiment] = {
alternatives: [ "control_opt", "other_opt" ],
goals: ["goal1", "goal2"]
}
ab_test :my_experiment
expect(Split::Experiment.new(:my_experiment).alternatives.map(&:name)).to eq([ "control_opt", "other_opt" ])
expect(Split::Experiment.new(:my_experiment).goals).to eq([ "goal1", "goal2" ])
end
it "can be called multiple times" do
Split.configuration.experiments[:my_experiment] = {
alternatives: [ "control_opt", "other_opt" ],
goals: ["goal1", "goal2"]
}
5.times { ab_test :my_experiment }
experiment = Split::Experiment.new(:my_experiment)
expect(experiment.alternatives.map(&:name)).to eq([ "control_opt", "other_opt" ])
expect(experiment.goals).to eq([ "goal1", "goal2" ])
expect(experiment.participant_count).to eq(1)
end
it "accepts multiple goals" do
Split.configuration.experiments[:my_experiment] = {
alternatives: [ "control_opt", "other_opt" ],
goals: [ "goal1", "goal2", "goal3" ]
}
ab_test :my_experiment
experiment = Split::Experiment.new(:my_experiment)
expect(experiment.goals).to eq([ "goal1", "goal2", "goal3" ])
end
it "allow specifying goals to be optional" do
Split.configuration.experiments[:my_experiment] = {
alternatives: [ "control_opt", "other_opt" ]
}
experiment = Split::Experiment.new(:my_experiment)
expect(experiment.goals).to eq([])
end
it "accepts multiple alternatives" do
Split.configuration.experiments[:my_experiment] = {
alternatives: [ "control_opt", "second_opt", "third_opt" ],
}
ab_test :my_experiment
experiment = Split::Experiment.new(:my_experiment)
expect(experiment.alternatives.map(&:name)).to eq([ "control_opt", "second_opt", "third_opt" ])
end
it "accepts probability on alternatives" do
Split.configuration.experiments[:my_experiment] = {
alternatives: [
{ name: "control_opt", percent: 67 },
{ name: "second_opt", percent: 10 },
{ name: "third_opt", percent: 23 },
],
}
ab_test :my_experiment
experiment = Split::Experiment.new(:my_experiment)
expect(experiment.alternatives.collect { |a| [a.name, a.weight] }).to eq([["control_opt", 0.67], ["second_opt", 0.1], ["third_opt", 0.23]])
end
it "accepts probability on some alternatives" do
Split.configuration.experiments[:my_experiment] = {
alternatives: [
{ name: "control_opt", percent: 34 },
"second_opt",
{ name: "third_opt", percent: 23 },
"fourth_opt",
],
}
ab_test :my_experiment
experiment = Split::Experiment.new(:my_experiment)
names_and_weights = experiment.alternatives.collect { |a| [a.name, a.weight] }
expect(names_and_weights).to eq([["control_opt", 0.34], ["second_opt", 0.215], ["third_opt", 0.23], ["fourth_opt", 0.215]])
expect(names_and_weights.inject(0) { |sum, nw| sum + nw[1] }).to eq(1.0)
end
it "allows name param without probability" do
Split.configuration.experiments[:my_experiment] = {
alternatives: [
{ name: "control_opt" },
"second_opt",
{ name: "third_opt", percent: 64 },
],
}
ab_test :my_experiment
experiment = Split::Experiment.new(:my_experiment)
names_and_weights = experiment.alternatives.collect { |a| [a.name, a.weight] }
expect(names_and_weights).to eq([["control_opt", 0.18], ["second_opt", 0.18], ["third_opt", 0.64]])
expect(names_and_weights.inject(0) { |sum, nw| sum + nw[1] }).to eq(1.0)
end
it "fails gracefully if config is missing experiment" do
Split.configuration.experiments = { other_experiment: { foo: "Bar" } }
expect { ab_test :my_experiment }.to raise_error(Split::ExperimentNotFound)
end
it "fails gracefully if config is missing" do
expect { Split.configuration.experiments = nil }.to raise_error(Split::InvalidExperimentsFormatError)
end
it "fails gracefully if config is missing alternatives" do
Split.configuration.experiments[:my_experiment] = { foo: "Bar" }
expect { ab_test :my_experiment }.to raise_error(NoMethodError)
end
end
it "should handle multiple experiments correctly" do
experiment2 = Split::ExperimentCatalog.find_or_create("link_color2", "blue", "red")
ab_test("link_color", "blue", "red")
ab_test("link_color2", "blue", "red")
ab_finished("link_color2")
experiment2.alternatives.each do |alt|
expect(alt.unfinished_count).to eq(0)
end
end
context "with goals" do
before do
@experiment = { "link_color" => ["purchase", "refund"] }
@alternatives = ["blue", "red"]
@experiment_name, @goals = normalize_metric(@experiment)
@goal1 = @goals[0]
@goal2 = @goals[1]
end
it "should normalize experiment" do
expect(@experiment_name).to eq("link_color")
expect(@goals).to eq(["purchase", "refund"])
end
describe "ab_test" do
it "should allow experiment goals interface as a single hash" do
ab_test(@experiment, *@alternatives)
experiment = Split::ExperimentCatalog.find("link_color")
expect(experiment.goals).to eq(["purchase", "refund"])
end
end
describe "ab_finished" do
before do
@alternative_name = ab_test(@experiment, *@alternatives)
end
it "should increment the counter for the specified-goal completed alternative" do
expect { ab_finished({ "link_color" => ["purchase"] }) }
.to change { Split::Alternative.new(@alternative_name, @experiment_name).completed_count(@goal2) }.by(0)
.and change { Split::Alternative.new(@alternative_name, @experiment_name).completed_count(@goal1) }.by(1)
end
end
end
end
<MSG> Merge pull request #62 from bhcarpenter/allow_override_param_without_redis
Allow parameter overrides, even without Redis.
<DFF> @@ -410,6 +410,27 @@ describe Split::Helper do
"shared/#{alternative}"
end.should eq('shared/blue')
end
+
+ context 'and db_failover_allow_parameter_override config option is turned on' do
+ before(:each) do
+ Split.configure do |config|
+ config.db_failover_allow_parameter_override = true
+ end
+ end
+
+ context 'and given an override parameter' do
+ it 'should use given override instead of the first alternative' do
+ @params = {'link_color' => 'red'}
+ ab_test('link_color', 'blue', 'red').should eq('red')
+ ab_test('link_color', 'blue', 'red', 'green').should eq('red')
+ ab_test('link_color', {'blue' => 0.01}, 'red' => 0.2).should eq('red')
+ ab_test('link_color', {'blue' => 0.8}, {'red' => 20}).should eq('red')
+ ab_test('link_color', 'blue', 'red') do |alternative|
+ "shared/#{alternative}"
+ end.should eq('shared/red')
+ end
+ end
+ end
end
describe 'finished' do
| 21 | Merge pull request #62 from bhcarpenter/allow_override_param_without_redis | 0 | .rb | rb | mit | splitrb/split |
10071064 | <NME> helper_spec.rb
<BEF> # frozen_string_literal: true
require "spec_helper"
# TODO change some of these tests to use Rack::Test
describe Split::Helper do
include Split::Helper
let(:experiment) {
Split::ExperimentCatalog.find_or_create("link_color", "blue", "red")
}
describe "ab_test" do
it "should not raise an error when passed strings for alternatives" do
expect { ab_test("xyz", "1", "2", "3") }.not_to raise_error
end
it "should not raise an error when passed an array for alternatives" do
expect { ab_test("xyz", ["1", "2", "3"]) }.not_to raise_error
end
it "should raise the appropriate error when passed integers for alternatives" do
expect { ab_test("xyz", 1, 2, 3) }.to raise_error(ArgumentError)
end
it "should raise the appropriate error when passed symbols for alternatives" do
expect { ab_test("xyz", :a, :b, :c) }.to raise_error(ArgumentError)
end
it "should not raise error when passed an array for goals" do
expect { ab_test({ "link_color" => ["purchase", "refund"] }, "blue", "red") }.not_to raise_error
end
it "should not raise error when passed just one goal" do
expect { ab_test({ "link_color" => "purchase" }, "blue", "red") }.not_to raise_error
end
it "raises an appropriate error when processing combined expirements" do
Split.configuration.experiments = {
combined_exp_1: {
alternatives: [ { name: "control", percent: 50 }, { name: "test-alt", percent: 50 } ],
metric: :my_metric,
combined_experiments: [:combined_exp_1_sub_1]
}
}
Split::ExperimentCatalog.find_or_create("combined_exp_1")
expect { ab_test("combined_exp_1") }.to raise_error(Split::InvalidExperimentsFormatError)
end
it "should assign a random alternative to a new user when there are an equal number of alternatives assigned" do
ab_test("link_color", "blue", "red")
expect(["red", "blue"]).to include(ab_user["link_color"])
end
it "should increment the participation counter after assignment to a new user" do
previous_red_count = Split::Alternative.new("red", "link_color").participant_count
previous_blue_count = Split::Alternative.new("blue", "link_color").participant_count
ab_test("link_color", "blue", "red")
new_red_count = Split::Alternative.new("red", "link_color").participant_count
new_blue_count = Split::Alternative.new("blue", "link_color").participant_count
expect((new_red_count + new_blue_count)).to eq(previous_red_count + previous_blue_count + 1)
end
it "should not increment the counter for an experiment that the user is not participating in" do
ab_test("link_color", "blue", "red")
e = Split::ExperimentCatalog.find_or_create("button_size", "small", "big")
expect {
# User shouldn't participate in this second experiment
ab_test("button_size", "small", "big")
}.not_to change { e.participant_count }
end
it "should not increment the counter for an ended experiment" do
e = Split::ExperimentCatalog.find_or_create("button_size", "small", "big")
e.winner = "small"
expect {
a = ab_test("button_size", "small", "big")
expect(a).to eq("small")
}.not_to change { e.participant_count }
end
it "should not increment the counter for an not started experiment" do
expect(Split.configuration).to receive(:start_manually).and_return(true)
e = Split::ExperimentCatalog.find_or_create("button_size", "small", "big")
expect {
a = ab_test("button_size", "small", "big")
expect(a).to eq("small")
}.not_to change { e.participant_count }
end
it "should return the given alternative for an existing user" do
expect(ab_test("link_color", "blue", "red")).to eq ab_test("link_color", "blue", "red")
end
it "should always return the winner if one is present" do
experiment.winner = "orange"
expect(ab_test("link_color", "blue", "red")).to eq("orange")
end
it "should allow the alternative to be forced by passing it in the params" do
# ?ab_test[link_color]=blue
@params = { "ab_test" => { "link_color" => "blue" } }
alternative = ab_test("link_color", "blue", "red")
expect(alternative).to eq("blue")
alternative = ab_test("link_color", { "blue" => 1 }, "red" => 5)
expect(alternative).to eq("blue")
@params = { "ab_test" => { "link_color" => "red" } }
alternative = ab_test("link_color", "blue", "red")
expect(alternative).to eq("red")
alternative = ab_test("link_color", { "blue" => 5 }, "red" => 1)
expect(alternative).to eq("red")
end
it "should not allow an arbitrary alternative" do
@params = { "ab_test" => { "link_color" => "pink" } }
alternative = ab_test("link_color", "blue")
expect(alternative).to eq("blue")
end
it "should not store the split when a param forced alternative" do
@params = { "ab_test" => { "link_color" => "blue" } }
expect(ab_user).not_to receive(:[]=)
ab_test("link_color", "blue", "red")
end
it "SPLIT_DISABLE query parameter should also force the alternative (uses control)" do
@params = { "SPLIT_DISABLE" => "true" }
alternative = ab_test("link_color", "blue", "red")
expect(alternative).to eq("blue")
alternative = ab_test("link_color", { "blue" => 1 }, "red" => 5)
expect(alternative).to eq("blue")
alternative = ab_test("link_color", "red", "blue")
expect(alternative).to eq("red")
alternative = ab_test("link_color", { "red" => 5 }, "blue" => 1)
expect(alternative).to eq("red")
end
it "should not store the split when Split generically disabled" do
@params = { "SPLIT_DISABLE" => "true" }
expect(ab_user).not_to receive(:[]=)
ab_test("link_color", "blue", "red")
end
context "when store_override is set" do
before { Split.configuration.store_override = true }
it "should store the forced alternative" do
@params = { "ab_test" => { "link_color" => "blue" } }
expect(ab_user).to receive(:[]=).with("link_color", "blue")
ab_test("link_color", "blue", "red")
end
end
context "when on_trial_choose is set" do
before { Split.configuration.on_trial_choose = :some_method }
it "should call the method" do
expect(self).to receive(:some_method)
ab_test("link_color", "blue", "red")
end
end
it "should allow passing a block" do
alt = ab_test("link_color", "blue", "red")
ret = ab_test("link_color", "blue", "red") { |alternative| "shared/#{alternative}" }
expect(ret).to eq("shared/#{alt}")
end
it "should allow the share of visitors see an alternative to be specified" do
ab_test("link_color", { "blue" => 0.8 }, { "red" => 20 })
expect(["red", "blue"]).to include(ab_user["link_color"])
end
it "should allow alternative weighting interface as a single hash" do
ab_test("link_color", { "blue" => 0.01 }, "red" => 0.2)
experiment = Split::ExperimentCatalog.find("link_color")
expect(experiment.alternatives.map(&:name)).to eq(["blue", "red"])
expect(experiment.alternatives.collect { |a| a.weight }).to match_array([0.01, 0.2])
end
it "should only let a user participate in one experiment at a time" do
link_color = ab_test("link_color", "blue", "red")
ab_test("button_size", "small", "big")
expect(ab_user["link_color"]).to eq(link_color)
big = Split::Alternative.new("big", "button_size")
expect(big.participant_count).to eq(0)
small = Split::Alternative.new("small", "button_size")
expect(small.participant_count).to eq(0)
end
it "should let a user participate in many experiment with allow_multiple_experiments option" do
Split.configure do |config|
config.allow_multiple_experiments = true
end
link_color = ab_test("link_color", "blue", "red")
button_size = ab_test("button_size", "small", "big")
expect(ab_user["link_color"]).to eq(link_color)
expect(ab_user["button_size"]).to eq(button_size)
button_size_alt = Split::Alternative.new(button_size, "button_size")
expect(button_size_alt.participant_count).to eq(1)
end
context "with allow_multiple_experiments = 'control'" do
it "should let a user participate in many experiment with one non-'control' alternative" do
Split.configure do |config|
config.allow_multiple_experiments = "control"
end
groups = 100.times.map do |n|
ab_test("test#{n}".to_sym, { "control" => (100 - n) }, { "test#{n}-alt" => n })
end
experiments = ab_user.active_experiments
expect(experiments.size).to be > 1
count_control = experiments.values.count { |g| g == "control" }
expect(count_control).to eq(experiments.size - 1)
count_alts = groups.count { |g| g != "control" }
expect(count_alts).to eq(1)
end
context "when user already has experiment" do
let(:mock_user) { Split::User.new(self, { "test_0" => "test-alt" }) }
before do
Split.configure do |config|
config.allow_multiple_experiments = "control"
end
Split::ExperimentCatalog.find_or_initialize("test_0", "control", "test-alt").save
Split::ExperimentCatalog.find_or_initialize("test_1", "control", "test-alt").save
end
it "should restore previously selected alternative" do
expect(ab_user.active_experiments.size).to eq 1
expect(ab_test(:test_0, { "control" => 100 }, { "test-alt" => 1 })).to eq "test-alt"
expect(ab_test(:test_0, { "control" => 1 }, { "test-alt" => 100 })).to eq "test-alt"
end
it "should select the correct alternatives after experiment resets" do
experiment = Split::ExperimentCatalog.find(:test_0)
experiment.reset
mock_user[experiment.key] = "test-alt"
expect(ab_user.active_experiments.size).to eq 1
expect(ab_test(:test_0, { "control" => 100 }, { "test-alt" => 1 })).to eq "test-alt"
expect(ab_test(:test_0, { "control" => 0 }, { "test-alt" => 100 })).to eq "test-alt"
end
it "lets override existing choice" do
pending "this requires user store reset on first call not depending on whelther it is current trial"
@params = { "ab_test" => { "test_1" => "test-alt" } }
expect(ab_test(:test_0, { "control" => 0 }, { "test-alt" => 100 })).to eq "control"
expect(ab_test(:test_1, { "control" => 100 }, { "test-alt" => 1 })).to eq "test-alt"
end
end
end
it "should not over-write a finished key when an experiment is on a later version" do
experiment.increment_version
ab_user = { experiment.key => "blue", experiment.finished_key => true }
finished_session = ab_user.dup
ab_test("link_color", "blue", "red")
expect(ab_user).to eq(finished_session)
end
end
describe "metadata" do
context "is defined" do
before do
Split.configuration.experiments = {
my_experiment: {
alternatives: ["one", "two"],
resettable: false,
metadata: { "one" => "Meta1", "two" => "Meta2" }
}
}
end
it "should be passed to helper block" do
@params = { "ab_test" => { "my_experiment" => "two" } }
expect(ab_test("my_experiment")).to eq "two"
expect(ab_test("my_experiment") do |alternative, meta|
meta
end).to eq("Meta2")
end
it "should pass control metadata helper block if library disabled" do
Split.configure do |config|
config.enabled = false
end
expect(ab_test("my_experiment")).to eq "one"
expect(ab_test("my_experiment") do |_, meta|
meta
end).to eq("Meta1")
end
end
context "is not defined" do
before do
Split.configuration.experiments = {
my_experiment: {
alternatives: ["one", "two"],
resettable: false,
metadata: nil
}
}
end
it "should be passed to helper block" do
expect(ab_test("my_experiment") do |alternative, meta|
meta
end).to eq({})
end
it "should pass control metadata helper block if library disabled" do
Split.configure do |config|
config.enabled = false
end
expect(ab_test("my_experiment") do |_, meta|
meta
end).to eq({})
end
end
end
describe "ab_finished" do
context "for an experiment that the user participates in" do
before(:each) do
@experiment_name = "link_color"
@alternatives = ["blue", "red"]
@experiment = Split::ExperimentCatalog.find_or_create(@experiment_name, *@alternatives)
@alternative_name = ab_test(@experiment_name, *@alternatives)
@previous_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count
end
it "should increment the counter for the completed alternative" do
ab_finished(@experiment_name)
new_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count
expect(new_completion_count).to eq(@previous_completion_count + 1)
end
it "should set experiment's finished key if reset is false" do
ab_finished(@experiment_name, { reset: false })
expect(ab_user[@experiment.key]).to eq(@alternative_name)
expect(ab_user[@experiment.finished_key]).to eq(true)
end
it "should not increment the counter if reset is false and the experiment has been already finished" do
2.times { ab_finished(@experiment_name, { reset: false }) }
new_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count
expect(new_completion_count).to eq(@previous_completion_count + 1)
end
it "should not increment the counter for an ended experiment" do
e = Split::ExperimentCatalog.find_or_create("button_size", "small", "big")
e.winner = "small"
a = ab_test("button_size", "small", "big")
expect(a).to eq("small")
expect {
ab_finished("button_size")
}.not_to change { Split::Alternative.new(a, "button_size").completed_count }
end
it "should clear out the user's participation from their session" do
expect(ab_user[@experiment.key]).to eq(@alternative_name)
ab_finished(@experiment_name)
expect(ab_user.keys).to be_empty
end
it "should not clear out the users session if reset is false" do
expect(ab_user[@experiment.key]).to eq(@alternative_name)
ab_finished(@experiment_name, { reset: false })
expect(ab_user[@experiment.key]).to eq(@alternative_name)
expect(ab_user[@experiment.finished_key]).to eq(true)
end
it "should reset the users session when experiment is not versioned" do
expect(ab_user[@experiment.key]).to eq(@alternative_name)
ab_finished(@experiment_name)
expect(ab_user.keys).to be_empty
end
it "should reset the users session when experiment is versioned" do
@experiment.increment_version
@alternative_name = ab_test(@experiment_name, *@alternatives)
expect(ab_user[@experiment.key]).to eq(@alternative_name)
ab_finished(@experiment_name)
expect(ab_user.keys).to be_empty
end
context "when on_trial_complete is set" do
before { Split.configuration.on_trial_complete = :some_method }
it "should call the method" do
expect(self).to receive(:some_method)
ab_finished(@experiment_name)
"shared/#{alternative}"
end.should eq('shared/blue')
end
end
describe 'finished' do
end
context "for an experiment that the user does not participate in" do
before do
Split::ExperimentCatalog.find_or_create(:not_started_experiment, "control", "alt")
end
it "should not raise an exception" do
expect { ab_finished(:not_started_experiment) }.not_to raise_exception
end
it "should not change the user state when reset is false" do
expect { ab_finished(:not_started_experiment, reset: false) }.not_to change { ab_user.keys }.from([])
end
it "should not change the user state when reset is true" do
expect(self).not_to receive(:reset!)
ab_finished(:not_started_experiment)
end
it "should not increment the completed counter" do
ab_finished(:not_started_experiment)
expect(Split::Alternative.new("control", :not_started_experiment).completed_count).to eq(0)
expect(Split::Alternative.new("alt", :not_started_experiment).completed_count).to eq(0)
end
end
end
context "finished with config" do
it "passes reset option" do
Split.configuration.experiments = {
my_experiment: {
alternatives: ["one", "two"],
resettable: false,
}
}
alternative = ab_test(:my_experiment)
experiment = Split::ExperimentCatalog.find :my_experiment
ab_finished :my_experiment
expect(ab_user[experiment.key]).to eq(alternative)
expect(ab_user[experiment.finished_key]).to eq(true)
end
end
context "finished with metric name" do
before { Split.configuration.experiments = {} }
before { expect(Split::Alternative).to receive(:new).at_least(1).times.and_call_original }
def should_finish_experiment(experiment_name, should_finish = true)
alts = Split.configuration.experiments[experiment_name][:alternatives]
experiment = Split::ExperimentCatalog.find_or_create(experiment_name, *alts)
alt_name = ab_user[experiment.key] = alts.first
alt = double("alternative")
expect(alt).to receive(:name).at_most(1).times.and_return(alt_name)
expect(Split::Alternative).to receive(:new).at_most(1).times.with(alt_name, experiment_name.to_s).and_return(alt)
if should_finish
expect(alt).to receive(:increment_completion).at_most(1).times
else
expect(alt).not_to receive(:increment_completion)
end
end
it "completes the test" do
Split.configuration.experiments[:my_experiment] = {
alternatives: [ "control_opt", "other_opt" ],
metric: :my_metric
}
should_finish_experiment :my_experiment
ab_finished :my_metric
end
it "completes all relevant tests" do
Split.configuration.experiments = {
exp_1: {
alternatives: [ "1-1", "1-2" ],
metric: :my_metric
},
exp_2: {
alternatives: [ "2-1", "2-2" ],
metric: :another_metric
},
exp_3: {
alternatives: [ "3-1", "3-2" ],
metric: :my_metric
},
}
should_finish_experiment :exp_1
should_finish_experiment :exp_2, false
should_finish_experiment :exp_3
ab_finished :my_metric
end
it "passes reset option" do
Split.configuration.experiments = {
my_exp: {
alternatives: ["one", "two"],
metric: :my_metric,
resettable: false,
}
}
alternative_name = ab_test(:my_exp)
exp = Split::ExperimentCatalog.find :my_exp
ab_finished :my_metric
expect(ab_user[exp.key]).to eq(alternative_name)
expect(ab_user[exp.finished_key]).to be_truthy
end
it "passes through options" do
Split.configuration.experiments = {
my_exp: {
alternatives: ["one", "two"],
metric: :my_metric,
}
}
alternative_name = ab_test(:my_exp)
exp = Split::ExperimentCatalog.find :my_exp
ab_finished :my_metric, reset: false
expect(ab_user[exp.key]).to eq(alternative_name)
expect(ab_user[exp.finished_key]).to be_truthy
end
end
describe "conversions" do
it "should return a conversion rate for an alternative" do
alternative_name = ab_test("link_color", "blue", "red")
previous_convertion_rate = Split::Alternative.new(alternative_name, "link_color").conversion_rate
expect(previous_convertion_rate).to eq(0.0)
ab_finished("link_color")
new_convertion_rate = Split::Alternative.new(alternative_name, "link_color").conversion_rate
expect(new_convertion_rate).to eq(1.0)
end
end
describe "active experiments" do
it "should show an active test" do
alternative = ab_test("def", "4", "5", "6")
expect(active_experiments.count).to eq 1
expect(active_experiments.first[0]).to eq "def"
expect(active_experiments.first[1]).to eq alternative
end
it "should show a finished test" do
alternative = ab_test("def", "4", "5", "6")
ab_finished("def", { reset: false })
expect(active_experiments.count).to eq 1
expect(active_experiments.first[0]).to eq "def"
expect(active_experiments.first[1]).to eq alternative
end
it "should show an active test when an experiment is on a later version" do
experiment.reset
expect(experiment.version).to eq(1)
ab_test("link_color", "blue", "red")
expect(active_experiments.count).to eq 1
expect(active_experiments.first[0]).to eq "link_color"
end
it "should show versioned tests properly" do
10.times { experiment.reset }
alternative = ab_test(experiment.name, "blue", "red")
ab_finished(experiment.name, reset: false)
expect(experiment.version).to eq(10)
expect(active_experiments.count).to eq 1
expect(active_experiments).to eq({ "link_color" => alternative })
end
it "should show multiple tests" do
Split.configure do |config|
config.allow_multiple_experiments = true
end
alternative = ab_test("def", "4", "5", "6")
another_alternative = ab_test("ghi", "7", "8", "9")
expect(active_experiments.count).to eq 2
expect(active_experiments["def"]).to eq alternative
expect(active_experiments["ghi"]).to eq another_alternative
end
it "should not show tests with winners" do
Split.configure do |config|
config.allow_multiple_experiments = true
end
e = Split::ExperimentCatalog.find_or_create("def", "4", "5", "6")
e.winner = "4"
ab_test("def", "4", "5", "6")
another_alternative = ab_test("ghi", "7", "8", "9")
expect(active_experiments.count).to eq 1
expect(active_experiments.first[0]).to eq "ghi"
expect(active_experiments.first[1]).to eq another_alternative
end
end
describe "when user is a robot" do
before(:each) do
@request = OpenStruct.new(user_agent: "Googlebot/2.1 (+http://www.google.com/bot.html)")
end
describe "ab_test" do
it "should return the control" do
alternative = ab_test("link_color", "blue", "red")
expect(alternative).to eq experiment.control.name
end
it "should not create a experiment" do
ab_test("link_color", "blue", "red")
expect(Split::Experiment.new("link_color")).to be_a_new_record
end
it "should not increment the participation count" do
previous_red_count = Split::Alternative.new("red", "link_color").participant_count
previous_blue_count = Split::Alternative.new("blue", "link_color").participant_count
ab_test("link_color", "blue", "red")
new_red_count = Split::Alternative.new("red", "link_color").participant_count
new_blue_count = Split::Alternative.new("blue", "link_color").participant_count
expect((new_red_count + new_blue_count)).to eq(previous_red_count + previous_blue_count)
end
end
describe "finished" do
it "should not increment the completed count" do
alternative_name = ab_test("link_color", "blue", "red")
previous_completion_count = Split::Alternative.new(alternative_name, "link_color").completed_count
ab_finished("link_color")
new_completion_count = Split::Alternative.new(alternative_name, "link_color").completed_count
expect(new_completion_count).to eq(previous_completion_count)
end
end
end
describe "when providing custom ignore logic" do
context "using a proc to configure custom logic" do
before(:each) do
Split.configure do |c|
c.ignore_filter = proc { |request| true } # ignore everything
end
end
it "ignores the ab_test" do
ab_test("link_color", "blue", "red")
red_count = Split::Alternative.new("red", "link_color").participant_count
blue_count = Split::Alternative.new("blue", "link_color").participant_count
expect((red_count + blue_count)).to be(0)
end
end
end
shared_examples_for "a disabled test" do
describe "ab_test" do
it "should return the control" do
alternative = ab_test("link_color", "blue", "red")
expect(alternative).to eq experiment.control.name
end
it "should not increment the participation count" do
previous_red_count = Split::Alternative.new("red", "link_color").participant_count
previous_blue_count = Split::Alternative.new("blue", "link_color").participant_count
ab_test("link_color", "blue", "red")
new_red_count = Split::Alternative.new("red", "link_color").participant_count
new_blue_count = Split::Alternative.new("blue", "link_color").participant_count
expect((new_red_count + new_blue_count)).to eq(previous_red_count + previous_blue_count)
end
end
describe "finished" do
it "should not increment the completed count" do
alternative_name = ab_test("link_color", "blue", "red")
previous_completion_count = Split::Alternative.new(alternative_name, "link_color").completed_count
ab_finished("link_color")
new_completion_count = Split::Alternative.new(alternative_name, "link_color").completed_count
expect(new_completion_count).to eq(previous_completion_count)
end
end
end
describe "when ip address is ignored" do
context "individually" do
before(:each) do
@request = OpenStruct.new(ip: "81.19.48.130")
Split.configure do |c|
c.ignore_ip_addresses << "81.19.48.130"
end
end
it_behaves_like "a disabled test"
end
context "for a range" do
before(:each) do
@request = OpenStruct.new(ip: "81.19.48.129")
Split.configure do |c|
c.ignore_ip_addresses << /81\.19\.48\.[0-9]+/
end
end
it_behaves_like "a disabled test"
end
context "using both a range and a specific value" do
before(:each) do
@request = OpenStruct.new(ip: "81.19.48.128")
Split.configure do |c|
c.ignore_ip_addresses << "81.19.48.130"
c.ignore_ip_addresses << /81\.19\.48\.[0-9]+/
end
end
it_behaves_like "a disabled test"
end
context "when ignored other address" do
before do
@request = OpenStruct.new(ip: "1.1.1.1")
Split.configure do |c|
c.ignore_ip_addresses << "81.19.48.130"
end
end
it "works as usual" do
alternative_name = ab_test("link_color", "red", "blue")
expect {
ab_finished("link_color")
}.to change(Split::Alternative.new(alternative_name, "link_color"), :completed_count).by(1)
end
end
end
describe "when user is previewing" do
before(:each) do
@request = OpenStruct.new(headers: { "x-purpose" => "preview" })
end
it_behaves_like "a disabled test"
end
describe "versioned experiments" do
it "should use version zero if no version is present" do
alternative_name = ab_test("link_color", "blue", "red")
expect(experiment.version).to eq(0)
expect(ab_user["link_color"]).to eq(alternative_name)
end
it "should save the version of the experiment to the session" do
experiment.reset
expect(experiment.version).to eq(1)
alternative_name = ab_test("link_color", "blue", "red")
expect(ab_user["link_color:1"]).to eq(alternative_name)
end
it "should load the experiment even if the version is not 0" do
experiment.reset
expect(experiment.version).to eq(1)
alternative_name = ab_test("link_color", "blue", "red")
expect(ab_user["link_color:1"]).to eq(alternative_name)
return_alternative_name = ab_test("link_color", "blue", "red")
expect(return_alternative_name).to eq(alternative_name)
end
it "should reset the session of a user on an older version of the experiment" do
alternative_name = ab_test("link_color", "blue", "red")
expect(ab_user["link_color"]).to eq(alternative_name)
alternative = Split::Alternative.new(alternative_name, "link_color")
expect(alternative.participant_count).to eq(1)
experiment.reset
expect(experiment.version).to eq(1)
alternative = Split::Alternative.new(alternative_name, "link_color")
expect(alternative.participant_count).to eq(0)
new_alternative_name = ab_test("link_color", "blue", "red")
expect(ab_user["link_color:1"]).to eq(new_alternative_name)
new_alternative = Split::Alternative.new(new_alternative_name, "link_color")
expect(new_alternative.participant_count).to eq(1)
end
it "should cleanup old versions of experiments from the session" do
alternative_name = ab_test("link_color", "blue", "red")
expect(ab_user["link_color"]).to eq(alternative_name)
alternative = Split::Alternative.new(alternative_name, "link_color")
expect(alternative.participant_count).to eq(1)
experiment.reset
expect(experiment.version).to eq(1)
alternative = Split::Alternative.new(alternative_name, "link_color")
expect(alternative.participant_count).to eq(0)
new_alternative_name = ab_test("link_color", "blue", "red")
expect(ab_user["link_color:1"]).to eq(new_alternative_name)
end
it "should only count completion of users on the current version" do
alternative_name = ab_test("link_color", "blue", "red")
expect(ab_user["link_color"]).to eq(alternative_name)
Split::Alternative.new(alternative_name, "link_color")
experiment.reset
expect(experiment.version).to eq(1)
ab_finished("link_color")
alternative = Split::Alternative.new(alternative_name, "link_color")
expect(alternative.completed_count).to eq(0)
end
end
context "when redis is not available" do
before(:each) do
expect(Split).to receive(:redis).at_most(5).times.and_raise(Errno::ECONNREFUSED.new)
end
context "and db_failover config option is turned off" do
before(:each) do
Split.configure do |config|
config.db_failover = false
end
end
describe "ab_test" do
it "should raise an exception" do
expect { ab_test("link_color", "blue", "red") }.to raise_error(Errno::ECONNREFUSED)
end
end
describe "finished" do
it "should raise an exception" do
expect { ab_finished("link_color") }.to raise_error(Errno::ECONNREFUSED)
end
end
describe "disable split testing" do
before(:each) do
Split.configure do |config|
config.enabled = false
end
end
it "should not attempt to connect to redis" do
expect { ab_test("link_color", "blue", "red") }.not_to raise_error
end
it "should return control variable" do
expect(ab_test("link_color", "blue", "red")).to eq("blue")
expect { ab_finished("link_color") }.not_to raise_error
end
end
end
context "and db_failover config option is turned on" do
before(:each) do
Split.configure do |config|
config.db_failover = true
end
end
describe "ab_test" do
it "should not raise an exception" do
expect { ab_test("link_color", "blue", "red") }.not_to raise_error
end
it "should call db_failover_on_db_error proc with error as parameter" do
Split.configure do |config|
config.db_failover_on_db_error = proc do |error|
expect(error).to be_a(Errno::ECONNREFUSED)
end
end
expect(Split.configuration.db_failover_on_db_error).to receive(:call).and_call_original
ab_test("link_color", "blue", "red")
end
it "should always use first alternative" do
expect(ab_test("link_color", "blue", "red")).to eq("blue")
expect(ab_test("link_color", { "blue" => 0.01 }, "red" => 0.2)).to eq("blue")
expect(ab_test("link_color", { "blue" => 0.8 }, { "red" => 20 })).to eq("blue")
expect(ab_test("link_color", "blue", "red") do |alternative|
"shared/#{alternative}"
end).to eq("shared/blue")
end
context "and db_failover_allow_parameter_override config option is turned on" do
before(:each) do
Split.configure do |config|
config.db_failover_allow_parameter_override = true
end
end
context "and given an override parameter" do
it "should use given override instead of the first alternative" do
@params = { "ab_test" => { "link_color" => "red" } }
expect(ab_test("link_color", "blue", "red")).to eq("red")
expect(ab_test("link_color", "blue", "red", "green")).to eq("red")
expect(ab_test("link_color", { "blue" => 0.01 }, "red" => 0.2)).to eq("red")
expect(ab_test("link_color", { "blue" => 0.8 }, { "red" => 20 })).to eq("red")
expect(ab_test("link_color", "blue", "red") do |alternative|
"shared/#{alternative}"
end).to eq("shared/red")
end
end
end
context "and preloaded config given" do
before do
Split.configuration.experiments[:link_color] = {
alternatives: [ "blue", "red" ],
}
end
it "uses first alternative" do
expect(ab_test(:link_color)).to eq("blue")
end
end
end
describe "finished" do
it "should not raise an exception" do
expect { ab_finished("link_color") }.not_to raise_error
end
it "should call db_failover_on_db_error proc with error as parameter" do
Split.configure do |config|
config.db_failover_on_db_error = proc do |error|
expect(error).to be_a(Errno::ECONNREFUSED)
end
end
expect(Split.configuration.db_failover_on_db_error).to receive(:call).and_call_original
ab_finished("link_color")
end
end
end
end
context "with preloaded config" do
before { Split.configuration.experiments = {} }
it "pulls options from config file" do
Split.configuration.experiments[:my_experiment] = {
alternatives: [ "control_opt", "other_opt" ],
goals: ["goal1", "goal2"]
}
ab_test :my_experiment
expect(Split::Experiment.new(:my_experiment).alternatives.map(&:name)).to eq([ "control_opt", "other_opt" ])
expect(Split::Experiment.new(:my_experiment).goals).to eq([ "goal1", "goal2" ])
end
it "can be called multiple times" do
Split.configuration.experiments[:my_experiment] = {
alternatives: [ "control_opt", "other_opt" ],
goals: ["goal1", "goal2"]
}
5.times { ab_test :my_experiment }
experiment = Split::Experiment.new(:my_experiment)
expect(experiment.alternatives.map(&:name)).to eq([ "control_opt", "other_opt" ])
expect(experiment.goals).to eq([ "goal1", "goal2" ])
expect(experiment.participant_count).to eq(1)
end
it "accepts multiple goals" do
Split.configuration.experiments[:my_experiment] = {
alternatives: [ "control_opt", "other_opt" ],
goals: [ "goal1", "goal2", "goal3" ]
}
ab_test :my_experiment
experiment = Split::Experiment.new(:my_experiment)
expect(experiment.goals).to eq([ "goal1", "goal2", "goal3" ])
end
it "allow specifying goals to be optional" do
Split.configuration.experiments[:my_experiment] = {
alternatives: [ "control_opt", "other_opt" ]
}
experiment = Split::Experiment.new(:my_experiment)
expect(experiment.goals).to eq([])
end
it "accepts multiple alternatives" do
Split.configuration.experiments[:my_experiment] = {
alternatives: [ "control_opt", "second_opt", "third_opt" ],
}
ab_test :my_experiment
experiment = Split::Experiment.new(:my_experiment)
expect(experiment.alternatives.map(&:name)).to eq([ "control_opt", "second_opt", "third_opt" ])
end
it "accepts probability on alternatives" do
Split.configuration.experiments[:my_experiment] = {
alternatives: [
{ name: "control_opt", percent: 67 },
{ name: "second_opt", percent: 10 },
{ name: "third_opt", percent: 23 },
],
}
ab_test :my_experiment
experiment = Split::Experiment.new(:my_experiment)
expect(experiment.alternatives.collect { |a| [a.name, a.weight] }).to eq([["control_opt", 0.67], ["second_opt", 0.1], ["third_opt", 0.23]])
end
it "accepts probability on some alternatives" do
Split.configuration.experiments[:my_experiment] = {
alternatives: [
{ name: "control_opt", percent: 34 },
"second_opt",
{ name: "third_opt", percent: 23 },
"fourth_opt",
],
}
ab_test :my_experiment
experiment = Split::Experiment.new(:my_experiment)
names_and_weights = experiment.alternatives.collect { |a| [a.name, a.weight] }
expect(names_and_weights).to eq([["control_opt", 0.34], ["second_opt", 0.215], ["third_opt", 0.23], ["fourth_opt", 0.215]])
expect(names_and_weights.inject(0) { |sum, nw| sum + nw[1] }).to eq(1.0)
end
it "allows name param without probability" do
Split.configuration.experiments[:my_experiment] = {
alternatives: [
{ name: "control_opt" },
"second_opt",
{ name: "third_opt", percent: 64 },
],
}
ab_test :my_experiment
experiment = Split::Experiment.new(:my_experiment)
names_and_weights = experiment.alternatives.collect { |a| [a.name, a.weight] }
expect(names_and_weights).to eq([["control_opt", 0.18], ["second_opt", 0.18], ["third_opt", 0.64]])
expect(names_and_weights.inject(0) { |sum, nw| sum + nw[1] }).to eq(1.0)
end
it "fails gracefully if config is missing experiment" do
Split.configuration.experiments = { other_experiment: { foo: "Bar" } }
expect { ab_test :my_experiment }.to raise_error(Split::ExperimentNotFound)
end
it "fails gracefully if config is missing" do
expect { Split.configuration.experiments = nil }.to raise_error(Split::InvalidExperimentsFormatError)
end
it "fails gracefully if config is missing alternatives" do
Split.configuration.experiments[:my_experiment] = { foo: "Bar" }
expect { ab_test :my_experiment }.to raise_error(NoMethodError)
end
end
it "should handle multiple experiments correctly" do
experiment2 = Split::ExperimentCatalog.find_or_create("link_color2", "blue", "red")
ab_test("link_color", "blue", "red")
ab_test("link_color2", "blue", "red")
ab_finished("link_color2")
experiment2.alternatives.each do |alt|
expect(alt.unfinished_count).to eq(0)
end
end
context "with goals" do
before do
@experiment = { "link_color" => ["purchase", "refund"] }
@alternatives = ["blue", "red"]
@experiment_name, @goals = normalize_metric(@experiment)
@goal1 = @goals[0]
@goal2 = @goals[1]
end
it "should normalize experiment" do
expect(@experiment_name).to eq("link_color")
expect(@goals).to eq(["purchase", "refund"])
end
describe "ab_test" do
it "should allow experiment goals interface as a single hash" do
ab_test(@experiment, *@alternatives)
experiment = Split::ExperimentCatalog.find("link_color")
expect(experiment.goals).to eq(["purchase", "refund"])
end
end
describe "ab_finished" do
before do
@alternative_name = ab_test(@experiment, *@alternatives)
end
it "should increment the counter for the specified-goal completed alternative" do
expect { ab_finished({ "link_color" => ["purchase"] }) }
.to change { Split::Alternative.new(@alternative_name, @experiment_name).completed_count(@goal2) }.by(0)
.and change { Split::Alternative.new(@alternative_name, @experiment_name).completed_count(@goal1) }.by(1)
end
end
end
end
<MSG> Merge pull request #62 from bhcarpenter/allow_override_param_without_redis
Allow parameter overrides, even without Redis.
<DFF> @@ -410,6 +410,27 @@ describe Split::Helper do
"shared/#{alternative}"
end.should eq('shared/blue')
end
+
+ context 'and db_failover_allow_parameter_override config option is turned on' do
+ before(:each) do
+ Split.configure do |config|
+ config.db_failover_allow_parameter_override = true
+ end
+ end
+
+ context 'and given an override parameter' do
+ it 'should use given override instead of the first alternative' do
+ @params = {'link_color' => 'red'}
+ ab_test('link_color', 'blue', 'red').should eq('red')
+ ab_test('link_color', 'blue', 'red', 'green').should eq('red')
+ ab_test('link_color', {'blue' => 0.01}, 'red' => 0.2).should eq('red')
+ ab_test('link_color', {'blue' => 0.8}, {'red' => 20}).should eq('red')
+ ab_test('link_color', 'blue', 'red') do |alternative|
+ "shared/#{alternative}"
+ end.should eq('shared/red')
+ end
+ end
+ end
end
describe 'finished' do
| 21 | Merge pull request #62 from bhcarpenter/allow_override_param_without_redis | 0 | .rb | rb | mit | splitrb/split |
10071065 | <NME> cookie_adapter_spec.rb
<BEF> # frozen_string_literal: true
require "spec_helper"
require "rack/test"
describe Split::Persistence::CookieAdapter do
let(:env) { Rack::MockRequest.env_for("http://example.com:8080/") }
let(:request) { Rack::Request.new(env) }
let(:response) { Rack::MockResponse.new(200, {}, "") }
let(:context) { double(request: request, response: response) }
subject { Split::Persistence::CookieAdapter.new(context) }
describe "#[] and #[]=" do
end
it "handles invalid JSON" do
context.request.cookies["split"] = "{\"foo\":2,"
expect(subject["my_key"]).to be_nil
subject["my_key"] = "my_value"
expect(subject["my_key"]).to eq("my_value")
end
it "ignores valid JSON of invalid type (integer)" do
context.request.cookies["split"] = "2"
expect(subject["my_key"]).to be_nil
subject["my_key"] = "my_value"
expect(subject["my_key"]).to eq("my_value")
end
it "ignores valid JSON of invalid type (array)" do
context.request.cookies["split"] = "[\"foo\", \"bar\"]"
expect(subject["my_key"]).to be_nil
subject["my_key"] = "my_value"
expect(subject["my_key"]).to eq("my_value")
end
end
expect(subject["my_key"]).to eq("my_value")
end
end
let(:context) { double(request: request, response: response, cookies: CookiesMock.new) }
include_examples "sets cookies correctly"
it "puts multiple experiments in a single cookie" do
subject["foo"] = "FOO"
subject["bar"] = "BAR"
expect(context.response.headers["Set-Cookie"]).to match(/\Asplit=%7B%22foo%22%3A%22FOO%22%2C%22bar%22%3A%22BAR%22%7D; path=\/; expires=[a-zA-Z]{3}, \d{2} [a-zA-Z]{3} \d{4} \d{2}:\d{2}:\d{2} [A-Z]{3}\Z/)
end
it "ensure other added cookies are not overriden" do
context.response.set_cookie "dummy", "wow"
subject["foo"] = "FOO"
expect(context.response.headers["Set-Cookie"]).to include("dummy=wow")
expect(context.response.headers["Set-Cookie"]).to include("split=")
end
end
context "when @context is an ActionController::Base" do
before :context do
require "rails"
require "action_controller/railtie"
end
let(:context) do
controller = controller_class.new
if controller.respond_to?(:set_request!)
controller.set_request!(ActionDispatch::Request.new({}))
else # Before rails 5.0
controller.send(:"request=", ActionDispatch::Request.new({}))
end
response = ActionDispatch::Response.new(200, {}, "").tap do |res|
res.request = controller.request
end
if controller.respond_to?(:set_response!)
controller.set_response!(response)
else # Before rails 5.0
controller.send(:set_response!, response)
end
controller
end
let(:controller_class) { Class.new(ActionController::Base) }
include_examples "sets cookies correctly"
it "puts multiple experiments in a single cookie" do
subject["foo"] = "FOO"
subject["bar"] = "BAR"
expect(subject.keys).to eq(["foo", "bar"])
expect(subject["foo"]).to eq("FOO")
expect(subject["bar"]).to eq("BAR")
cookie_jar = context.request.env["action_dispatch.cookies"]
expect(cookie_jar["split"]).to eq('{"foo":"FOO","bar":"BAR"}')
end
end
end
<MSG> Fix cookie header duplication (#522)
* Add failing spec for multiple Set-Cookie headers bug
* When ActionDispatch::Cookies is available from Rails tries to use it.
Otherwise parse current cookies set on response and replace it using Rack directly.
* Keep compatibility with Rack ~> 1.6.
New methods like Rack::Utils#make_delete_cookie_header were only added on 2.0.0
<DFF> @@ -7,7 +7,7 @@ describe Split::Persistence::CookieAdapter do
let(:env) { Rack::MockRequest.env_for("http://example.com:8080/") }
let(:request) { Rack::Request.new(env) }
let(:response) { Rack::MockResponse.new(200, {}, "") }
- let(:context) { double(request: request, response: response) }
+ let(:context) { double(request: request, response: response, cookies: CookiesMock.new) }
subject { Split::Persistence::CookieAdapter.new(context) }
describe "#[] and #[]=" do
@@ -40,4 +40,23 @@ describe Split::Persistence::CookieAdapter do
expect(subject["my_key"]).to eq("my_value")
end
+ it "puts multiple experiments in a single cookie" do
+ subject["foo"] = "FOO"
+ subject["bar"] = "BAR"
+ expect(context.response.headers["Set-Cookie"]).to match(/\Asplit=%7B%22foo%22%3A%22FOO%22%2C%22bar%22%3A%22BAR%22%7D; path=\/; expires=[a-zA-Z]{3}, \d{2} [a-zA-Z]{3} \d{4} \d{2}:\d{2}:\d{2} -0000\Z/)
+ end
+
+ it "ensure other added cookies are not overriden" do
+ context.response.set_cookie 'dummy', 'wow'
+ subject["foo"] = "FOO"
+ expect(context.response.headers["Set-Cookie"]).to include("dummy=wow")
+ expect(context.response.headers["Set-Cookie"]).to include("split=")
+ end
+
+ it "uses ActionDispatch::Cookie when available for cookie writing" do
+ allow(subject).to receive(:action_dispatch?).and_return(true)
+ subject["foo"] = "FOO"
+ expect(subject['foo']).to eq('FOO')
+ end
+
end
| 20 | Fix cookie header duplication (#522) | 1 | .rb | rb | mit | splitrb/split |
10071066 | <NME> cookie_adapter_spec.rb
<BEF> # frozen_string_literal: true
require "spec_helper"
require "rack/test"
describe Split::Persistence::CookieAdapter do
let(:env) { Rack::MockRequest.env_for("http://example.com:8080/") }
let(:request) { Rack::Request.new(env) }
let(:response) { Rack::MockResponse.new(200, {}, "") }
let(:context) { double(request: request, response: response) }
subject { Split::Persistence::CookieAdapter.new(context) }
describe "#[] and #[]=" do
end
it "handles invalid JSON" do
context.request.cookies["split"] = "{\"foo\":2,"
expect(subject["my_key"]).to be_nil
subject["my_key"] = "my_value"
expect(subject["my_key"]).to eq("my_value")
end
it "ignores valid JSON of invalid type (integer)" do
context.request.cookies["split"] = "2"
expect(subject["my_key"]).to be_nil
subject["my_key"] = "my_value"
expect(subject["my_key"]).to eq("my_value")
end
it "ignores valid JSON of invalid type (array)" do
context.request.cookies["split"] = "[\"foo\", \"bar\"]"
expect(subject["my_key"]).to be_nil
subject["my_key"] = "my_value"
expect(subject["my_key"]).to eq("my_value")
end
end
expect(subject["my_key"]).to eq("my_value")
end
end
let(:context) { double(request: request, response: response, cookies: CookiesMock.new) }
include_examples "sets cookies correctly"
it "puts multiple experiments in a single cookie" do
subject["foo"] = "FOO"
subject["bar"] = "BAR"
expect(context.response.headers["Set-Cookie"]).to match(/\Asplit=%7B%22foo%22%3A%22FOO%22%2C%22bar%22%3A%22BAR%22%7D; path=\/; expires=[a-zA-Z]{3}, \d{2} [a-zA-Z]{3} \d{4} \d{2}:\d{2}:\d{2} [A-Z]{3}\Z/)
end
it "ensure other added cookies are not overriden" do
context.response.set_cookie "dummy", "wow"
subject["foo"] = "FOO"
expect(context.response.headers["Set-Cookie"]).to include("dummy=wow")
expect(context.response.headers["Set-Cookie"]).to include("split=")
end
end
context "when @context is an ActionController::Base" do
before :context do
require "rails"
require "action_controller/railtie"
end
let(:context) do
controller = controller_class.new
if controller.respond_to?(:set_request!)
controller.set_request!(ActionDispatch::Request.new({}))
else # Before rails 5.0
controller.send(:"request=", ActionDispatch::Request.new({}))
end
response = ActionDispatch::Response.new(200, {}, "").tap do |res|
res.request = controller.request
end
if controller.respond_to?(:set_response!)
controller.set_response!(response)
else # Before rails 5.0
controller.send(:set_response!, response)
end
controller
end
let(:controller_class) { Class.new(ActionController::Base) }
include_examples "sets cookies correctly"
it "puts multiple experiments in a single cookie" do
subject["foo"] = "FOO"
subject["bar"] = "BAR"
expect(subject.keys).to eq(["foo", "bar"])
expect(subject["foo"]).to eq("FOO")
expect(subject["bar"]).to eq("BAR")
cookie_jar = context.request.env["action_dispatch.cookies"]
expect(cookie_jar["split"]).to eq('{"foo":"FOO","bar":"BAR"}')
end
end
end
<MSG> Fix cookie header duplication (#522)
* Add failing spec for multiple Set-Cookie headers bug
* When ActionDispatch::Cookies is available from Rails tries to use it.
Otherwise parse current cookies set on response and replace it using Rack directly.
* Keep compatibility with Rack ~> 1.6.
New methods like Rack::Utils#make_delete_cookie_header were only added on 2.0.0
<DFF> @@ -7,7 +7,7 @@ describe Split::Persistence::CookieAdapter do
let(:env) { Rack::MockRequest.env_for("http://example.com:8080/") }
let(:request) { Rack::Request.new(env) }
let(:response) { Rack::MockResponse.new(200, {}, "") }
- let(:context) { double(request: request, response: response) }
+ let(:context) { double(request: request, response: response, cookies: CookiesMock.new) }
subject { Split::Persistence::CookieAdapter.new(context) }
describe "#[] and #[]=" do
@@ -40,4 +40,23 @@ describe Split::Persistence::CookieAdapter do
expect(subject["my_key"]).to eq("my_value")
end
+ it "puts multiple experiments in a single cookie" do
+ subject["foo"] = "FOO"
+ subject["bar"] = "BAR"
+ expect(context.response.headers["Set-Cookie"]).to match(/\Asplit=%7B%22foo%22%3A%22FOO%22%2C%22bar%22%3A%22BAR%22%7D; path=\/; expires=[a-zA-Z]{3}, \d{2} [a-zA-Z]{3} \d{4} \d{2}:\d{2}:\d{2} -0000\Z/)
+ end
+
+ it "ensure other added cookies are not overriden" do
+ context.response.set_cookie 'dummy', 'wow'
+ subject["foo"] = "FOO"
+ expect(context.response.headers["Set-Cookie"]).to include("dummy=wow")
+ expect(context.response.headers["Set-Cookie"]).to include("split=")
+ end
+
+ it "uses ActionDispatch::Cookie when available for cookie writing" do
+ allow(subject).to receive(:action_dispatch?).and_return(true)
+ subject["foo"] = "FOO"
+ expect(subject['foo']).to eq('FOO')
+ end
+
end
| 20 | Fix cookie header duplication (#522) | 1 | .rb | rb | mit | splitrb/split |
10071067 | <NME> cookie_adapter_spec.rb
<BEF> # frozen_string_literal: true
require "spec_helper"
require "rack/test"
describe Split::Persistence::CookieAdapter do
let(:env) { Rack::MockRequest.env_for("http://example.com:8080/") }
let(:request) { Rack::Request.new(env) }
let(:response) { Rack::MockResponse.new(200, {}, "") }
let(:context) { double(request: request, response: response) }
subject { Split::Persistence::CookieAdapter.new(context) }
describe "#[] and #[]=" do
end
it "handles invalid JSON" do
context.request.cookies["split"] = "{\"foo\":2,"
expect(subject["my_key"]).to be_nil
subject["my_key"] = "my_value"
expect(subject["my_key"]).to eq("my_value")
end
it "ignores valid JSON of invalid type (integer)" do
context.request.cookies["split"] = "2"
expect(subject["my_key"]).to be_nil
subject["my_key"] = "my_value"
expect(subject["my_key"]).to eq("my_value")
end
it "ignores valid JSON of invalid type (array)" do
context.request.cookies["split"] = "[\"foo\", \"bar\"]"
expect(subject["my_key"]).to be_nil
subject["my_key"] = "my_value"
expect(subject["my_key"]).to eq("my_value")
end
end
expect(subject["my_key"]).to eq("my_value")
end
end
let(:context) { double(request: request, response: response, cookies: CookiesMock.new) }
include_examples "sets cookies correctly"
it "puts multiple experiments in a single cookie" do
subject["foo"] = "FOO"
subject["bar"] = "BAR"
expect(context.response.headers["Set-Cookie"]).to match(/\Asplit=%7B%22foo%22%3A%22FOO%22%2C%22bar%22%3A%22BAR%22%7D; path=\/; expires=[a-zA-Z]{3}, \d{2} [a-zA-Z]{3} \d{4} \d{2}:\d{2}:\d{2} [A-Z]{3}\Z/)
end
it "ensure other added cookies are not overriden" do
context.response.set_cookie "dummy", "wow"
subject["foo"] = "FOO"
expect(context.response.headers["Set-Cookie"]).to include("dummy=wow")
expect(context.response.headers["Set-Cookie"]).to include("split=")
end
end
context "when @context is an ActionController::Base" do
before :context do
require "rails"
require "action_controller/railtie"
end
let(:context) do
controller = controller_class.new
if controller.respond_to?(:set_request!)
controller.set_request!(ActionDispatch::Request.new({}))
else # Before rails 5.0
controller.send(:"request=", ActionDispatch::Request.new({}))
end
response = ActionDispatch::Response.new(200, {}, "").tap do |res|
res.request = controller.request
end
if controller.respond_to?(:set_response!)
controller.set_response!(response)
else # Before rails 5.0
controller.send(:set_response!, response)
end
controller
end
let(:controller_class) { Class.new(ActionController::Base) }
include_examples "sets cookies correctly"
it "puts multiple experiments in a single cookie" do
subject["foo"] = "FOO"
subject["bar"] = "BAR"
expect(subject.keys).to eq(["foo", "bar"])
expect(subject["foo"]).to eq("FOO")
expect(subject["bar"]).to eq("BAR")
cookie_jar = context.request.env["action_dispatch.cookies"]
expect(cookie_jar["split"]).to eq('{"foo":"FOO","bar":"BAR"}')
end
end
end
<MSG> Fix cookie header duplication (#522)
* Add failing spec for multiple Set-Cookie headers bug
* When ActionDispatch::Cookies is available from Rails tries to use it.
Otherwise parse current cookies set on response and replace it using Rack directly.
* Keep compatibility with Rack ~> 1.6.
New methods like Rack::Utils#make_delete_cookie_header were only added on 2.0.0
<DFF> @@ -7,7 +7,7 @@ describe Split::Persistence::CookieAdapter do
let(:env) { Rack::MockRequest.env_for("http://example.com:8080/") }
let(:request) { Rack::Request.new(env) }
let(:response) { Rack::MockResponse.new(200, {}, "") }
- let(:context) { double(request: request, response: response) }
+ let(:context) { double(request: request, response: response, cookies: CookiesMock.new) }
subject { Split::Persistence::CookieAdapter.new(context) }
describe "#[] and #[]=" do
@@ -40,4 +40,23 @@ describe Split::Persistence::CookieAdapter do
expect(subject["my_key"]).to eq("my_value")
end
+ it "puts multiple experiments in a single cookie" do
+ subject["foo"] = "FOO"
+ subject["bar"] = "BAR"
+ expect(context.response.headers["Set-Cookie"]).to match(/\Asplit=%7B%22foo%22%3A%22FOO%22%2C%22bar%22%3A%22BAR%22%7D; path=\/; expires=[a-zA-Z]{3}, \d{2} [a-zA-Z]{3} \d{4} \d{2}:\d{2}:\d{2} -0000\Z/)
+ end
+
+ it "ensure other added cookies are not overriden" do
+ context.response.set_cookie 'dummy', 'wow'
+ subject["foo"] = "FOO"
+ expect(context.response.headers["Set-Cookie"]).to include("dummy=wow")
+ expect(context.response.headers["Set-Cookie"]).to include("split=")
+ end
+
+ it "uses ActionDispatch::Cookie when available for cookie writing" do
+ allow(subject).to receive(:action_dispatch?).and_return(true)
+ subject["foo"] = "FOO"
+ expect(subject['foo']).to eq('FOO')
+ end
+
end
| 20 | Fix cookie header duplication (#522) | 1 | .rb | rb | mit | splitrb/split |
10071068 | <NME> configuration.rb
<BEF> # frozen_string_literal: true
module Split
class Configuration
attr_accessor :ignore_ip_addresses
attr_accessor :ignore_filter
attr_accessor :db_failover
attr_accessor :db_failover_on_db_error
attr_accessor :db_failover_allow_parameter_override
attr_accessor :allow_multiple_experiments
attr_accessor :enabled
attr_accessor :persistence
attr_accessor :persistence_cookie_length
attr_accessor :persistence_cookie_domain
attr_accessor :algorithm
attr_accessor :store_override
attr_accessor :start_manually
attr_accessor :reset_manually
attr_accessor :on_trial
attr_accessor :on_trial_choose
attr_accessor :on_trial_complete
attr_accessor :on_experiment_reset
attr_accessor :on_experiment_delete
attr_accessor :on_before_experiment_reset
attr_accessor :on_experiment_winner_choose
attr_accessor :on_before_experiment_delete
attr_accessor :include_rails_helper
attr_accessor :beta_probability_simulations
attr_accessor :winning_alternative_recalculation_interval
attr_accessor :redis
attr_accessor :dashboard_pagination_default_per_page
attr_accessor :cache
attr_reader :experiments
'Slurp' => 'Yahoo spider',
'Sogou' => 'Chinese search engine',
'spider' => 'generic web spider',
'WordPress' => 'WordPress spider',
'ZIBB' => 'ZIBB spider',
'YandexBot' => 'Yandex spider',
# HTTP libraries
'Apache-HttpClient' => 'Java http library',
"Butterfly" => "Topsy Labs",
"Gigabot" => "Gigabot spider",
"Googlebot" => "Google spider",
"MJ12bot" => "Majestic-12 spider",
"msnbot" => "Microsoft bot",
"rogerbot" => "SeoMoz spider",
"PaperLiBot" => "PaperLi is another content curation service",
"Slurp" => "Yahoo spider",
"Sogou" => "Chinese search engine",
"spider" => "generic web spider",
"UnwindFetchor" => "Gnip crawler",
"WordPress" => "WordPress spider",
"YandexAccessibilityBot" => "Yandex accessibility spider",
"YandexBot" => "Yandex spider",
"YandexMobileBot" => "Yandex mobile spider",
"ZIBB" => "ZIBB spider",
# HTTP libraries
"Apache-HttpClient" => "Java http library",
"AppEngine-Google" => "Google App Engine",
"curl" => "curl unix CLI http client",
"ColdFusion" => "ColdFusion http library",
"EventMachine HttpClient" => "Ruby http library",
"Go http package" => "Go http library",
"Go-http-client" => "Go http library",
"Java" => "Generic Java http library",
"libwww-perl" => "Perl client-server library loved by script kids",
"lwp-trivial" => "Another Perl library loved by script kids",
"Python-urllib" => "Python http library",
"PycURL" => "Python http library",
"Test Certificate Info" => "C http library?",
"Typhoeus" => "Ruby http library",
"Wget" => "wget unix CLI http client",
# URL expanders / previewers
"awe.sm" => "Awe.sm URL expander",
"bitlybot" => "bit.ly bot",
"[email protected]" => "Linkfluence bot",
"facebookexternalhit" => "facebook bot",
"Facebot" => "Facebook crawler",
"Feedfetcher-Google" => "Google Feedfetcher",
"https://developers.google.com/+/web/snippet" => "Google+ Snippet Fetcher",
"LinkedInBot" => "LinkedIn bot",
"LongURL" => "URL expander service",
"NING" => "NING - Yet Another Twitter Swarmer",
"Pinterestbot" => "Pinterest Bot",
"redditbot" => "Reddit Bot",
"ShortLinkTranslate" => "Link shortener",
"Slackbot" => "Slackbot link expander",
"TweetmemeBot" => "TweetMeMe Crawler",
"Twitterbot" => "Twitter URL expander",
"UnwindFetch" => "Gnip URL expander",
"vkShare" => "VKontake Sharer",
# Uptime monitoring
"check_http" => "Nagios monitor",
"GoogleStackdriverMonitoring" => "Google Cloud monitor",
"NewRelicPinger" => "NewRelic monitor",
"Panopta" => "Monitoring service",
"Pingdom" => "Pingdom monitoring",
"SiteUptime" => "Site monitoring services",
"UptimeRobot" => "Monitoring service",
# ???
"DigitalPersona Fingerprint Software" => "HP Fingerprint scanner",
"ShowyouBot" => "Showyou iOS app spider",
"ZyBorg" => "Zyborg? Hmmm....",
"ELB-HealthChecker" => "ELB Health Check"
}
end
def experiments=(experiments)
raise InvalidExperimentsFormatError.new("Experiments must be a Hash") unless experiments.respond_to?(:keys)
@experiments = experiments
end
def disabled?
!enabled
end
def experiment_for(name)
if normalized_experiments
# TODO symbols
normalized_experiments[name.to_sym]
end
end
def metrics
return @metrics if defined?(@metrics)
@metrics = {}
if self.experiments
self.experiments.each do |key, value|
metrics = value_for(value, :metric) rescue nil
Array(metrics).each do |metric_name|
if metric_name
@metrics[metric_name.to_sym] ||= []
@metrics[metric_name.to_sym] << Split::Experiment.new(key)
end
end
end
end
@metrics
end
def normalized_experiments
return nil if @experiments.nil?
experiment_config = {}
@experiments.keys.each do |name|
experiment_config[name.to_sym] = {}
end
@experiments.each do |experiment_name, settings|
alternatives = if (alts = value_for(settings, :alternatives))
normalize_alternatives(alts)
end
experiment_data = {
alternatives: alternatives,
goals: value_for(settings, :goals),
metadata: value_for(settings, :metadata),
algorithm: value_for(settings, :algorithm),
resettable: value_for(settings, :resettable)
}
experiment_data.each do |name, value|
experiment_config[experiment_name.to_sym][name] = value if value != nil
end
end
experiment_config
end
def normalize_alternatives(alternatives)
given_probability, num_with_probability = alternatives.inject([0, 0]) do |a, v|
p, n = a
if percent = value_for(v, :percent)
[p + percent, n + 1]
else
a
end
end
num_without_probability = alternatives.length - num_with_probability
unassigned_probability = ((100.0 - given_probability) / num_without_probability / 100.0)
if num_with_probability.nonzero?
alternatives = alternatives.map do |v|
if (name = value_for(v, :name)) && (percent = value_for(v, :percent))
{ name => percent / 100.0 }
elsif name = value_for(v, :name)
{ name => unassigned_probability }
else
{ v => unassigned_probability }
end
end
[alternatives.shift, alternatives]
else
alternatives = alternatives.dup
[alternatives.shift, alternatives]
end
end
def robot_regex
@robot_regex ||= /\b(?:#{escaped_bots.join('|')})\b|\A\W*\z/i
end
def initialize
@ignore_ip_addresses = []
@ignore_filter = proc { |request| is_robot? || is_ignored_ip_address? }
@db_failover = false
@db_failover_on_db_error = proc { |error| } # e.g. use Rails logger here
@on_experiment_reset = proc { |experiment| }
@on_experiment_delete = proc { |experiment| }
@on_before_experiment_reset = proc { |experiment| }
@on_before_experiment_delete = proc { |experiment| }
@on_experiment_winner_choose = proc { |experiment| }
@db_failover_allow_parameter_override = false
@allow_multiple_experiments = false
@enabled = true
@experiments = {}
@persistence = Split::Persistence::SessionAdapter
@persistence_cookie_length = 31536000 # One year from now
@persistence_cookie_domain = nil
@algorithm = Split::Algorithms::WeightedSample
@include_rails_helper = true
@beta_probability_simulations = 10000
@winning_alternative_recalculation_interval = 60 * 60 * 24 # 1 day
@redis = ENV.fetch(ENV.fetch("REDIS_PROVIDER", "REDIS_URL"), "redis://localhost:6379")
@dashboard_pagination_default_per_page = 10
end
private
def value_for(hash, key)
if hash.kind_of?(Hash)
hash.has_key?(key.to_s) ? hash[key.to_s] : hash[key.to_sym]
end
end
def escaped_bots
bots.map { |key, _| Regexp.escape(key) }
end
end
end
<MSG> Added another bot.
<DFF> @@ -36,9 +36,10 @@ module Split
'Slurp' => 'Yahoo spider',
'Sogou' => 'Chinese search engine',
'spider' => 'generic web spider',
+ 'UnwindFetchor' => 'Gnip crawler'
'WordPress' => 'WordPress spider',
- 'ZIBB' => 'ZIBB spider',
'YandexBot' => 'Yandex spider',
+ 'ZIBB' => 'ZIBB spider',
# HTTP libraries
'Apache-HttpClient' => 'Java http library',
| 2 | Added another bot. | 1 | .rb | rb | mit | splitrb/split |
10071069 | <NME> configuration.rb
<BEF> # frozen_string_literal: true
module Split
class Configuration
attr_accessor :ignore_ip_addresses
attr_accessor :ignore_filter
attr_accessor :db_failover
attr_accessor :db_failover_on_db_error
attr_accessor :db_failover_allow_parameter_override
attr_accessor :allow_multiple_experiments
attr_accessor :enabled
attr_accessor :persistence
attr_accessor :persistence_cookie_length
attr_accessor :persistence_cookie_domain
attr_accessor :algorithm
attr_accessor :store_override
attr_accessor :start_manually
attr_accessor :reset_manually
attr_accessor :on_trial
attr_accessor :on_trial_choose
attr_accessor :on_trial_complete
attr_accessor :on_experiment_reset
attr_accessor :on_experiment_delete
attr_accessor :on_before_experiment_reset
attr_accessor :on_experiment_winner_choose
attr_accessor :on_before_experiment_delete
attr_accessor :include_rails_helper
attr_accessor :beta_probability_simulations
attr_accessor :winning_alternative_recalculation_interval
attr_accessor :redis
attr_accessor :dashboard_pagination_default_per_page
attr_accessor :cache
attr_reader :experiments
'Slurp' => 'Yahoo spider',
'Sogou' => 'Chinese search engine',
'spider' => 'generic web spider',
'WordPress' => 'WordPress spider',
'ZIBB' => 'ZIBB spider',
'YandexBot' => 'Yandex spider',
# HTTP libraries
'Apache-HttpClient' => 'Java http library',
"Butterfly" => "Topsy Labs",
"Gigabot" => "Gigabot spider",
"Googlebot" => "Google spider",
"MJ12bot" => "Majestic-12 spider",
"msnbot" => "Microsoft bot",
"rogerbot" => "SeoMoz spider",
"PaperLiBot" => "PaperLi is another content curation service",
"Slurp" => "Yahoo spider",
"Sogou" => "Chinese search engine",
"spider" => "generic web spider",
"UnwindFetchor" => "Gnip crawler",
"WordPress" => "WordPress spider",
"YandexAccessibilityBot" => "Yandex accessibility spider",
"YandexBot" => "Yandex spider",
"YandexMobileBot" => "Yandex mobile spider",
"ZIBB" => "ZIBB spider",
# HTTP libraries
"Apache-HttpClient" => "Java http library",
"AppEngine-Google" => "Google App Engine",
"curl" => "curl unix CLI http client",
"ColdFusion" => "ColdFusion http library",
"EventMachine HttpClient" => "Ruby http library",
"Go http package" => "Go http library",
"Go-http-client" => "Go http library",
"Java" => "Generic Java http library",
"libwww-perl" => "Perl client-server library loved by script kids",
"lwp-trivial" => "Another Perl library loved by script kids",
"Python-urllib" => "Python http library",
"PycURL" => "Python http library",
"Test Certificate Info" => "C http library?",
"Typhoeus" => "Ruby http library",
"Wget" => "wget unix CLI http client",
# URL expanders / previewers
"awe.sm" => "Awe.sm URL expander",
"bitlybot" => "bit.ly bot",
"[email protected]" => "Linkfluence bot",
"facebookexternalhit" => "facebook bot",
"Facebot" => "Facebook crawler",
"Feedfetcher-Google" => "Google Feedfetcher",
"https://developers.google.com/+/web/snippet" => "Google+ Snippet Fetcher",
"LinkedInBot" => "LinkedIn bot",
"LongURL" => "URL expander service",
"NING" => "NING - Yet Another Twitter Swarmer",
"Pinterestbot" => "Pinterest Bot",
"redditbot" => "Reddit Bot",
"ShortLinkTranslate" => "Link shortener",
"Slackbot" => "Slackbot link expander",
"TweetmemeBot" => "TweetMeMe Crawler",
"Twitterbot" => "Twitter URL expander",
"UnwindFetch" => "Gnip URL expander",
"vkShare" => "VKontake Sharer",
# Uptime monitoring
"check_http" => "Nagios monitor",
"GoogleStackdriverMonitoring" => "Google Cloud monitor",
"NewRelicPinger" => "NewRelic monitor",
"Panopta" => "Monitoring service",
"Pingdom" => "Pingdom monitoring",
"SiteUptime" => "Site monitoring services",
"UptimeRobot" => "Monitoring service",
# ???
"DigitalPersona Fingerprint Software" => "HP Fingerprint scanner",
"ShowyouBot" => "Showyou iOS app spider",
"ZyBorg" => "Zyborg? Hmmm....",
"ELB-HealthChecker" => "ELB Health Check"
}
end
def experiments=(experiments)
raise InvalidExperimentsFormatError.new("Experiments must be a Hash") unless experiments.respond_to?(:keys)
@experiments = experiments
end
def disabled?
!enabled
end
def experiment_for(name)
if normalized_experiments
# TODO symbols
normalized_experiments[name.to_sym]
end
end
def metrics
return @metrics if defined?(@metrics)
@metrics = {}
if self.experiments
self.experiments.each do |key, value|
metrics = value_for(value, :metric) rescue nil
Array(metrics).each do |metric_name|
if metric_name
@metrics[metric_name.to_sym] ||= []
@metrics[metric_name.to_sym] << Split::Experiment.new(key)
end
end
end
end
@metrics
end
def normalized_experiments
return nil if @experiments.nil?
experiment_config = {}
@experiments.keys.each do |name|
experiment_config[name.to_sym] = {}
end
@experiments.each do |experiment_name, settings|
alternatives = if (alts = value_for(settings, :alternatives))
normalize_alternatives(alts)
end
experiment_data = {
alternatives: alternatives,
goals: value_for(settings, :goals),
metadata: value_for(settings, :metadata),
algorithm: value_for(settings, :algorithm),
resettable: value_for(settings, :resettable)
}
experiment_data.each do |name, value|
experiment_config[experiment_name.to_sym][name] = value if value != nil
end
end
experiment_config
end
def normalize_alternatives(alternatives)
given_probability, num_with_probability = alternatives.inject([0, 0]) do |a, v|
p, n = a
if percent = value_for(v, :percent)
[p + percent, n + 1]
else
a
end
end
num_without_probability = alternatives.length - num_with_probability
unassigned_probability = ((100.0 - given_probability) / num_without_probability / 100.0)
if num_with_probability.nonzero?
alternatives = alternatives.map do |v|
if (name = value_for(v, :name)) && (percent = value_for(v, :percent))
{ name => percent / 100.0 }
elsif name = value_for(v, :name)
{ name => unassigned_probability }
else
{ v => unassigned_probability }
end
end
[alternatives.shift, alternatives]
else
alternatives = alternatives.dup
[alternatives.shift, alternatives]
end
end
def robot_regex
@robot_regex ||= /\b(?:#{escaped_bots.join('|')})\b|\A\W*\z/i
end
def initialize
@ignore_ip_addresses = []
@ignore_filter = proc { |request| is_robot? || is_ignored_ip_address? }
@db_failover = false
@db_failover_on_db_error = proc { |error| } # e.g. use Rails logger here
@on_experiment_reset = proc { |experiment| }
@on_experiment_delete = proc { |experiment| }
@on_before_experiment_reset = proc { |experiment| }
@on_before_experiment_delete = proc { |experiment| }
@on_experiment_winner_choose = proc { |experiment| }
@db_failover_allow_parameter_override = false
@allow_multiple_experiments = false
@enabled = true
@experiments = {}
@persistence = Split::Persistence::SessionAdapter
@persistence_cookie_length = 31536000 # One year from now
@persistence_cookie_domain = nil
@algorithm = Split::Algorithms::WeightedSample
@include_rails_helper = true
@beta_probability_simulations = 10000
@winning_alternative_recalculation_interval = 60 * 60 * 24 # 1 day
@redis = ENV.fetch(ENV.fetch("REDIS_PROVIDER", "REDIS_URL"), "redis://localhost:6379")
@dashboard_pagination_default_per_page = 10
end
private
def value_for(hash, key)
if hash.kind_of?(Hash)
hash.has_key?(key.to_s) ? hash[key.to_s] : hash[key.to_sym]
end
end
def escaped_bots
bots.map { |key, _| Regexp.escape(key) }
end
end
end
<MSG> Added another bot.
<DFF> @@ -36,9 +36,10 @@ module Split
'Slurp' => 'Yahoo spider',
'Sogou' => 'Chinese search engine',
'spider' => 'generic web spider',
+ 'UnwindFetchor' => 'Gnip crawler'
'WordPress' => 'WordPress spider',
- 'ZIBB' => 'ZIBB spider',
'YandexBot' => 'Yandex spider',
+ 'ZIBB' => 'ZIBB spider',
# HTTP libraries
'Apache-HttpClient' => 'Java http library',
| 2 | Added another bot. | 1 | .rb | rb | mit | splitrb/split |
10071070 | <NME> configuration.rb
<BEF> # frozen_string_literal: true
module Split
class Configuration
attr_accessor :ignore_ip_addresses
attr_accessor :ignore_filter
attr_accessor :db_failover
attr_accessor :db_failover_on_db_error
attr_accessor :db_failover_allow_parameter_override
attr_accessor :allow_multiple_experiments
attr_accessor :enabled
attr_accessor :persistence
attr_accessor :persistence_cookie_length
attr_accessor :persistence_cookie_domain
attr_accessor :algorithm
attr_accessor :store_override
attr_accessor :start_manually
attr_accessor :reset_manually
attr_accessor :on_trial
attr_accessor :on_trial_choose
attr_accessor :on_trial_complete
attr_accessor :on_experiment_reset
attr_accessor :on_experiment_delete
attr_accessor :on_before_experiment_reset
attr_accessor :on_experiment_winner_choose
attr_accessor :on_before_experiment_delete
attr_accessor :include_rails_helper
attr_accessor :beta_probability_simulations
attr_accessor :winning_alternative_recalculation_interval
attr_accessor :redis
attr_accessor :dashboard_pagination_default_per_page
attr_accessor :cache
attr_reader :experiments
'Slurp' => 'Yahoo spider',
'Sogou' => 'Chinese search engine',
'spider' => 'generic web spider',
'WordPress' => 'WordPress spider',
'ZIBB' => 'ZIBB spider',
'YandexBot' => 'Yandex spider',
# HTTP libraries
'Apache-HttpClient' => 'Java http library',
"Butterfly" => "Topsy Labs",
"Gigabot" => "Gigabot spider",
"Googlebot" => "Google spider",
"MJ12bot" => "Majestic-12 spider",
"msnbot" => "Microsoft bot",
"rogerbot" => "SeoMoz spider",
"PaperLiBot" => "PaperLi is another content curation service",
"Slurp" => "Yahoo spider",
"Sogou" => "Chinese search engine",
"spider" => "generic web spider",
"UnwindFetchor" => "Gnip crawler",
"WordPress" => "WordPress spider",
"YandexAccessibilityBot" => "Yandex accessibility spider",
"YandexBot" => "Yandex spider",
"YandexMobileBot" => "Yandex mobile spider",
"ZIBB" => "ZIBB spider",
# HTTP libraries
"Apache-HttpClient" => "Java http library",
"AppEngine-Google" => "Google App Engine",
"curl" => "curl unix CLI http client",
"ColdFusion" => "ColdFusion http library",
"EventMachine HttpClient" => "Ruby http library",
"Go http package" => "Go http library",
"Go-http-client" => "Go http library",
"Java" => "Generic Java http library",
"libwww-perl" => "Perl client-server library loved by script kids",
"lwp-trivial" => "Another Perl library loved by script kids",
"Python-urllib" => "Python http library",
"PycURL" => "Python http library",
"Test Certificate Info" => "C http library?",
"Typhoeus" => "Ruby http library",
"Wget" => "wget unix CLI http client",
# URL expanders / previewers
"awe.sm" => "Awe.sm URL expander",
"bitlybot" => "bit.ly bot",
"[email protected]" => "Linkfluence bot",
"facebookexternalhit" => "facebook bot",
"Facebot" => "Facebook crawler",
"Feedfetcher-Google" => "Google Feedfetcher",
"https://developers.google.com/+/web/snippet" => "Google+ Snippet Fetcher",
"LinkedInBot" => "LinkedIn bot",
"LongURL" => "URL expander service",
"NING" => "NING - Yet Another Twitter Swarmer",
"Pinterestbot" => "Pinterest Bot",
"redditbot" => "Reddit Bot",
"ShortLinkTranslate" => "Link shortener",
"Slackbot" => "Slackbot link expander",
"TweetmemeBot" => "TweetMeMe Crawler",
"Twitterbot" => "Twitter URL expander",
"UnwindFetch" => "Gnip URL expander",
"vkShare" => "VKontake Sharer",
# Uptime monitoring
"check_http" => "Nagios monitor",
"GoogleStackdriverMonitoring" => "Google Cloud monitor",
"NewRelicPinger" => "NewRelic monitor",
"Panopta" => "Monitoring service",
"Pingdom" => "Pingdom monitoring",
"SiteUptime" => "Site monitoring services",
"UptimeRobot" => "Monitoring service",
# ???
"DigitalPersona Fingerprint Software" => "HP Fingerprint scanner",
"ShowyouBot" => "Showyou iOS app spider",
"ZyBorg" => "Zyborg? Hmmm....",
"ELB-HealthChecker" => "ELB Health Check"
}
end
def experiments=(experiments)
raise InvalidExperimentsFormatError.new("Experiments must be a Hash") unless experiments.respond_to?(:keys)
@experiments = experiments
end
def disabled?
!enabled
end
def experiment_for(name)
if normalized_experiments
# TODO symbols
normalized_experiments[name.to_sym]
end
end
def metrics
return @metrics if defined?(@metrics)
@metrics = {}
if self.experiments
self.experiments.each do |key, value|
metrics = value_for(value, :metric) rescue nil
Array(metrics).each do |metric_name|
if metric_name
@metrics[metric_name.to_sym] ||= []
@metrics[metric_name.to_sym] << Split::Experiment.new(key)
end
end
end
end
@metrics
end
def normalized_experiments
return nil if @experiments.nil?
experiment_config = {}
@experiments.keys.each do |name|
experiment_config[name.to_sym] = {}
end
@experiments.each do |experiment_name, settings|
alternatives = if (alts = value_for(settings, :alternatives))
normalize_alternatives(alts)
end
experiment_data = {
alternatives: alternatives,
goals: value_for(settings, :goals),
metadata: value_for(settings, :metadata),
algorithm: value_for(settings, :algorithm),
resettable: value_for(settings, :resettable)
}
experiment_data.each do |name, value|
experiment_config[experiment_name.to_sym][name] = value if value != nil
end
end
experiment_config
end
def normalize_alternatives(alternatives)
given_probability, num_with_probability = alternatives.inject([0, 0]) do |a, v|
p, n = a
if percent = value_for(v, :percent)
[p + percent, n + 1]
else
a
end
end
num_without_probability = alternatives.length - num_with_probability
unassigned_probability = ((100.0 - given_probability) / num_without_probability / 100.0)
if num_with_probability.nonzero?
alternatives = alternatives.map do |v|
if (name = value_for(v, :name)) && (percent = value_for(v, :percent))
{ name => percent / 100.0 }
elsif name = value_for(v, :name)
{ name => unassigned_probability }
else
{ v => unassigned_probability }
end
end
[alternatives.shift, alternatives]
else
alternatives = alternatives.dup
[alternatives.shift, alternatives]
end
end
def robot_regex
@robot_regex ||= /\b(?:#{escaped_bots.join('|')})\b|\A\W*\z/i
end
def initialize
@ignore_ip_addresses = []
@ignore_filter = proc { |request| is_robot? || is_ignored_ip_address? }
@db_failover = false
@db_failover_on_db_error = proc { |error| } # e.g. use Rails logger here
@on_experiment_reset = proc { |experiment| }
@on_experiment_delete = proc { |experiment| }
@on_before_experiment_reset = proc { |experiment| }
@on_before_experiment_delete = proc { |experiment| }
@on_experiment_winner_choose = proc { |experiment| }
@db_failover_allow_parameter_override = false
@allow_multiple_experiments = false
@enabled = true
@experiments = {}
@persistence = Split::Persistence::SessionAdapter
@persistence_cookie_length = 31536000 # One year from now
@persistence_cookie_domain = nil
@algorithm = Split::Algorithms::WeightedSample
@include_rails_helper = true
@beta_probability_simulations = 10000
@winning_alternative_recalculation_interval = 60 * 60 * 24 # 1 day
@redis = ENV.fetch(ENV.fetch("REDIS_PROVIDER", "REDIS_URL"), "redis://localhost:6379")
@dashboard_pagination_default_per_page = 10
end
private
def value_for(hash, key)
if hash.kind_of?(Hash)
hash.has_key?(key.to_s) ? hash[key.to_s] : hash[key.to_sym]
end
end
def escaped_bots
bots.map { |key, _| Regexp.escape(key) }
end
end
end
<MSG> Added another bot.
<DFF> @@ -36,9 +36,10 @@ module Split
'Slurp' => 'Yahoo spider',
'Sogou' => 'Chinese search engine',
'spider' => 'generic web spider',
+ 'UnwindFetchor' => 'Gnip crawler'
'WordPress' => 'WordPress spider',
- 'ZIBB' => 'ZIBB spider',
'YandexBot' => 'Yandex spider',
+ 'ZIBB' => 'ZIBB spider',
# HTTP libraries
'Apache-HttpClient' => 'Java http library',
| 2 | Added another bot. | 1 | .rb | rb | mit | splitrb/split |
10071071 | <NME> markup.ts
<BEF> import { strictEqual as equal } from 'assert';
import parse from '../src/markup';
import resolveConfig from '../src/config';
import stringify from './assets/stringify';
const defaultConfig = resolveConfig({ cache: {} });
function expand(abbr: string, config = defaultConfig): string {
return stringify(parse(abbr, config));
}
describe('Markup abbreviations', () => {
it('implicit tags', () => {
equal(expand('.'), '<div class=""></div>');
equal(expand('.foo>.bar'), '<div class="foo"><div class="bar"></div></div>');
equal(expand('p.foo>.bar'), '<p class="foo"><span class="bar"></span></p>');
equal(expand('ul>.item*2'), '<ul><li*2@0 class="item"></li><li*2@1 class="item"></li></ul>');
equal(expand('table>.row>.cell'), '<table><tr class="row"><td class="cell"></td></tr></table>');
equal(expand('{test}'), 'test');
equal(expand('.{test}'), '<div class="">test</div>');
equal(expand('ul>.item$*2'), '<ul><li*2@0 class="item1"></li><li*2@1 class="item2"></li></ul>');
});
it('XSL', () => {
const config = resolveConfig({ syntax: 'xsl' });
equal(expand('xsl:variable[select]', config), '<xsl:variable select=""></xsl:variable>');
equal(expand('xsl:with-param[select]', config), '<xsl:with-param select=""></xsl:with-param>');
equal(expand('xsl:variable[select]>div', config), '<xsl:variable><div></div></xsl:variable>');
equal(expand('xsl:with-param[select]{foo}', config), '<xsl:with-param>foo</xsl:with-param>');
});
describe('BEM transform', () => {
const config = resolveConfig({
options: { 'bem.enabled': true }
});
it('modifiers', () => {
equal(expand('div.b_m', config), '<div class="b b_m"></div>');
equal(expand('div.b._m', config), '<div class="b b_m"></div>');
equal(expand('div.b_m1._m2', config), '<div class="b b_m1 b_m2"></div>');
equal(expand('div.b>div._m', config), '<div class="b"><div class="b b_m"></div></div>');
equal(expand('div.b>div._m1>div._m2', config), '<div class="b"><div class="b b_m1"><div class="b b_m2"></div></div></div>');
// classnames with -
equal(expand('div.b>div._m1-m2', config), '<div class="b"><div class="b b_m1-m2"></div></div>');
});
it('elements', () => {
equal(expand('div.b>div.-e', config), '<div class="b"><div class="b__e"></div></div>');
equal(expand('div.b>div.---e', config), '<div class="b"><div class="b__e"></div></div>');
equal(expand('div.b>div.-e>div.-e', config), '<div class="b"><div class="b__e"><div class="b__e"></div></div></div>');
equal(expand('div', config), '<div></div>', 'Fixes bug with empty class');
// get block name from proper ancestor
equal(expand('div.b1>div.b2_m1>div.-e1+div.---e2_m2', config),
'<div class="b1"><div class="b2 b2_m1"><div class="b2__e1"></div><div class="b1__e2 b1__e2_m2"></div></div></div>');
// class names with -
equal(expand('div.b>div.-m1-m2', config), '<div class="b"><div class="b__m1-m2"></div></div>');
// class names with _
equal(expand('div.b_m_o', config), '<div class="b b_m_o"></div>');
});
it('customize modifier', () => {
const localConfig = resolveConfig({
options: {
'bem.enabled': true,
'bem.element': '-',
'bem.modifier': '__'
}
});
equal(expand('div.b_m', localConfig), '<div class="b b__m"></div>');
equal(expand('div.b._m', localConfig), '<div class="b b__m"></div>');
});
it('multiple classes after modifier/element', () => {
equal(expand('div.b_m.c', config), '<div class="b b_m c"></div>');
equal(expand('div.b>div._m.c', config), '<div class="b"><div class="b b_m c"></div></div>');
equal(expand('div.b>div.-m.c', config), '<div class="b"><div class="b__m c"></div></div>');
});
});
});
<MSG> [bem] Get block name from parent context
<DFF> @@ -79,5 +79,13 @@ describe('Markup abbreviations', () => {
equal(expand('div.b>div._m.c', config), '<div class="b"><div class="b b_m c"></div></div>');
equal(expand('div.b>div.-m.c', config), '<div class="b"><div class="b__m c"></div></div>');
});
+
+ it('parent context', () => {
+ // Get block name from context
+ equal(expand('.-e_m', resolveConfig({
+ context: { name: 'div', attributes: { class: 'bl' } },
+ options: { 'bem.enabled': true }
+ })), '<div class="bl__e bl__e_m"></div>');
+ });
});
});
| 8 | [bem] Get block name from parent context | 0 | .ts | ts | mit | emmetio/emmet |
10071072 | <NME> markup.ts
<BEF> import { strictEqual as equal } from 'assert';
import parse from '../src/markup';
import resolveConfig from '../src/config';
import stringify from './assets/stringify';
const defaultConfig = resolveConfig({ cache: {} });
function expand(abbr: string, config = defaultConfig): string {
return stringify(parse(abbr, config));
}
describe('Markup abbreviations', () => {
it('implicit tags', () => {
equal(expand('.'), '<div class=""></div>');
equal(expand('.foo>.bar'), '<div class="foo"><div class="bar"></div></div>');
equal(expand('p.foo>.bar'), '<p class="foo"><span class="bar"></span></p>');
equal(expand('ul>.item*2'), '<ul><li*2@0 class="item"></li><li*2@1 class="item"></li></ul>');
equal(expand('table>.row>.cell'), '<table><tr class="row"><td class="cell"></td></tr></table>');
equal(expand('{test}'), 'test');
equal(expand('.{test}'), '<div class="">test</div>');
equal(expand('ul>.item$*2'), '<ul><li*2@0 class="item1"></li><li*2@1 class="item2"></li></ul>');
});
it('XSL', () => {
const config = resolveConfig({ syntax: 'xsl' });
equal(expand('xsl:variable[select]', config), '<xsl:variable select=""></xsl:variable>');
equal(expand('xsl:with-param[select]', config), '<xsl:with-param select=""></xsl:with-param>');
equal(expand('xsl:variable[select]>div', config), '<xsl:variable><div></div></xsl:variable>');
equal(expand('xsl:with-param[select]{foo}', config), '<xsl:with-param>foo</xsl:with-param>');
});
describe('BEM transform', () => {
const config = resolveConfig({
options: { 'bem.enabled': true }
});
it('modifiers', () => {
equal(expand('div.b_m', config), '<div class="b b_m"></div>');
equal(expand('div.b._m', config), '<div class="b b_m"></div>');
equal(expand('div.b_m1._m2', config), '<div class="b b_m1 b_m2"></div>');
equal(expand('div.b>div._m', config), '<div class="b"><div class="b b_m"></div></div>');
equal(expand('div.b>div._m1>div._m2', config), '<div class="b"><div class="b b_m1"><div class="b b_m2"></div></div></div>');
// classnames with -
equal(expand('div.b>div._m1-m2', config), '<div class="b"><div class="b b_m1-m2"></div></div>');
});
it('elements', () => {
equal(expand('div.b>div.-e', config), '<div class="b"><div class="b__e"></div></div>');
equal(expand('div.b>div.---e', config), '<div class="b"><div class="b__e"></div></div>');
equal(expand('div.b>div.-e>div.-e', config), '<div class="b"><div class="b__e"><div class="b__e"></div></div></div>');
equal(expand('div', config), '<div></div>', 'Fixes bug with empty class');
// get block name from proper ancestor
equal(expand('div.b1>div.b2_m1>div.-e1+div.---e2_m2', config),
'<div class="b1"><div class="b2 b2_m1"><div class="b2__e1"></div><div class="b1__e2 b1__e2_m2"></div></div></div>');
// class names with -
equal(expand('div.b>div.-m1-m2', config), '<div class="b"><div class="b__m1-m2"></div></div>');
// class names with _
equal(expand('div.b_m_o', config), '<div class="b b_m_o"></div>');
});
it('customize modifier', () => {
const localConfig = resolveConfig({
options: {
'bem.enabled': true,
'bem.element': '-',
'bem.modifier': '__'
}
});
equal(expand('div.b_m', localConfig), '<div class="b b__m"></div>');
equal(expand('div.b._m', localConfig), '<div class="b b__m"></div>');
});
it('multiple classes after modifier/element', () => {
equal(expand('div.b_m.c', config), '<div class="b b_m c"></div>');
equal(expand('div.b>div._m.c', config), '<div class="b"><div class="b b_m c"></div></div>');
equal(expand('div.b>div.-m.c', config), '<div class="b"><div class="b__m c"></div></div>');
});
});
});
<MSG> [bem] Get block name from parent context
<DFF> @@ -79,5 +79,13 @@ describe('Markup abbreviations', () => {
equal(expand('div.b>div._m.c', config), '<div class="b"><div class="b b_m c"></div></div>');
equal(expand('div.b>div.-m.c', config), '<div class="b"><div class="b__m c"></div></div>');
});
+
+ it('parent context', () => {
+ // Get block name from context
+ equal(expand('.-e_m', resolveConfig({
+ context: { name: 'div', attributes: { class: 'bl' } },
+ options: { 'bem.enabled': true }
+ })), '<div class="bl__e bl__e_m"></div>');
+ });
});
});
| 8 | [bem] Get block name from parent context | 0 | .ts | ts | mit | emmetio/emmet |
10071073 | <NME> experiment.rb
<BEF> # frozen_string_literal: true
module Split
class Experiment
attr_accessor :name
attr_accessor :goals
attr_accessor :alternative_probabilities
attr_accessor :metadata
attr_reader :alternatives
attr_reader :resettable
DEFAULT_OPTIONS = {
resettable: true
}
def self.find(name)
Split.cache(:experiments, name) do
return unless Split.redis.exists?(name)
Experiment.new(name).tap { |exp| exp.load_from_redis }
end
end
def initialize(name, options = {})
options = DEFAULT_OPTIONS.merge(options)
@name = name.to_s
extract_alternatives_from_options(options)
end
def self.finished_key(key)
"#{key}:finished"
end
def set_alternatives_and_options(options)
options_with_defaults = DEFAULT_OPTIONS.merge(
options.reject { |k, v| v.nil? }
)
self.alternatives = options_with_defaults[:alternatives]
self.goals = options_with_defaults[:goals]
self.resettable = options_with_defaults[:resettable]
self.algorithm = options_with_defaults[:algorithm]
self.metadata = options_with_defaults[:metadata]
end
def extract_alternatives_from_options(options)
alts = options[:alternatives] || []
if alts.length == 1
if alts[0].is_a? Hash
alts = alts[0].map { |k, v| { k => v } }
end
end
if alts.empty?
exp_config = Split.configuration.experiment_for(name)
if exp_config
alts = load_alternatives_from_configuration
options[:goals] = Split::GoalsCollection.new(@name).load_from_configuration
options[:metadata] = load_metadata_from_configuration
options[:resettable] = exp_config[:resettable]
options[:algorithm] = exp_config[:algorithm]
end
end
options[:alternatives] = alts
set_alternatives_and_options(options)
# calculate probability that each alternative is the winner
@alternative_probabilities = {}
alts
end
def save
validate!
if new_record?
start unless Split.configuration.start_manually
persist_experiment_configuration
elsif experiment_configuration_has_changed?
reset unless Split.configuration.reset_manually
persist_experiment_configuration
end
redis.hmset(experiment_config_key, :resettable, resettable.to_s,
:algorithm, algorithm.to_s)
self
end
def validate!
if @alternatives.empty? && Split.configuration.experiment_for(@name).nil?
raise ExperimentNotFound.new("Experiment #{@name} not found")
end
@alternatives.each { |a| a.validate! }
goals_collection.validate!
end
def new_record?
ExperimentCatalog.find(name).nil?
end
def ==(obj)
self.name == obj.name
end
def [](name)
alternatives.find { |a| a.name == name }
end
def algorithm
@algorithm ||= Split.configuration.algorithm
end
def algorithm=(algorithm)
@algorithm = algorithm.is_a?(String) ? algorithm.constantize : algorithm
end
def resettable=(resettable)
@resettable = resettable.is_a?(String) ? resettable == "true" : resettable
end
def alternatives=(alts)
@alternatives = alts.map do |alternative|
if alternative.kind_of?(Split::Alternative)
alternative
else
Split::Alternative.new(alternative, @name)
end
end
end
def winner
Split.cache(:experiment_winner, name) do
experiment_winner = redis.hget(:experiment_winner, name)
if experiment_winner
Split::Alternative.new(experiment_winner, name)
else
nil
end
end
end
def has_winner?
return @has_winner if defined? @has_winner
@has_winner = !winner.nil?
end
def winner=(winner_name)
redis.hset(:experiment_winner, name, winner_name.to_s)
@has_winner = true
Split.configuration.on_experiment_winner_choose.call(self)
end
def participant_count
alternatives.inject(0) { |sum, a| sum + a.participant_count }
end
def control
alternatives.first
end
def reset_winner
redis.hdel(:experiment_winner, name)
@has_winner = false
Split::Cache.clear_key(@name)
end
def start
redis.hset(:experiment_start_times, @name, Time.now.to_i)
end
def start_time
Split.cache(:experiment_start_times, @name) do
t = redis.hget(:experiment_start_times, @name)
if t
# Check if stored time is an integer
if t =~ /^[-+]?[0-9]+$/
Time.at(t.to_i)
else
Time.parse(t)
end
end
end
end
def next_alternative
winner || random_alternative
end
def random_alternative
if alternatives.length > 1
algorithm.choose_alternative(self)
else
alternatives.first
end
end
def version
@version ||= (redis.get("#{name}:version").to_i || 0)
end
def increment_version
@version = redis.incr("#{name}:version")
end
def key
if version.to_i > 0
"#{name}:#{version}"
else
name
end
end
def goals_key
"#{name}:goals"
end
def finished_key
self.class.finished_key(key)
end
def metadata_key
"#{name}:metadata"
end
def resettable?
resettable
end
def reset
Split.configuration.on_before_experiment_reset.call(self)
Split::Cache.clear_key(@name)
alternatives.each(&:reset)
reset_winner
Split.configuration.on_experiment_reset.call(self)
increment_version
end
def delete
Split.configuration.on_before_experiment_delete.call(self)
if Split.configuration.start_manually
redis.hdel(:experiment_start_times, @name)
end
reset_winner
redis.srem(:experiments, name)
remove_experiment_cohorting
remove_experiment_configuration
Split.configuration.on_experiment_delete.call(self)
increment_version
end
def delete_metadata
redis.del(metadata_key)
end
def load_from_redis
exp_config = redis.hgetall(experiment_config_key)
options = {
resettable: exp_config["resettable"],
algorithm: exp_config["algorithm"],
alternatives: load_alternatives_from_redis,
goals: Split::GoalsCollection.new(@name).load_from_redis,
metadata: load_metadata_from_redis
}
set_alternatives_and_options(options)
end
def calc_winning_alternatives
# Cache the winning alternatives so we recalculate them once per the specified interval.
intervals_since_epoch =
Time.now.utc.to_i / Split.configuration.winning_alternative_recalculation_interval
if self.calc_time != intervals_since_epoch
if goals.empty?
self.estimate_winning_alternative
else
goals.each do |goal|
self.estimate_winning_alternative(goal)
end
end
self.calc_time = intervals_since_epoch
self.save
end
end
def estimate_winning_alternative(goal = nil)
# initialize a hash of beta distributions based on the alternatives' conversion rates
beta_params = calc_beta_params(goal)
winning_alternatives = []
Split.configuration.beta_probability_simulations.times do
# calculate simulated conversion rates from the beta distributions
simulated_cr_hash = calc_simulated_conversion_rates(beta_params)
winning_alternative = find_simulated_winner(simulated_cr_hash)
# push the winning pair to the winning_alternatives array
winning_alternatives.push(winning_alternative)
end
winning_counts = count_simulated_wins(winning_alternatives)
@alternative_probabilities = calc_alternative_probabilities(winning_counts, Split.configuration.beta_probability_simulations)
write_to_alternatives(goal)
self.save
end
def write_to_alternatives(goal = nil)
alternatives.each do |alternative|
alternative.set_p_winner(@alternative_probabilities[alternative], goal)
end
end
def calc_alternative_probabilities(winning_counts, number_of_simulations)
alternative_probabilities = {}
winning_counts.each do |alternative, wins|
alternative_probabilities[alternative] = wins / number_of_simulations.to_f
end
alternative_probabilities
end
def count_simulated_wins(winning_alternatives)
# initialize a hash to keep track of winning alternative in simulations
winning_counts = {}
alternatives.each do |alternative|
winning_counts[alternative] = 0
end
# count number of times each alternative won, calculate probabilities, place in hash
winning_alternatives.each do |alternative|
winning_counts[alternative] += 1
end
winning_counts
end
def find_simulated_winner(simulated_cr_hash)
# figure out which alternative had the highest simulated conversion rate
winning_pair = ["", 0.0]
simulated_cr_hash.each do |alternative, rate|
if rate > winning_pair[1]
winning_pair = [alternative, rate]
end
end
winner = winning_pair[0]
winner
end
def calc_simulated_conversion_rates(beta_params)
simulated_cr_hash = {}
# create a hash which has the conversion rate pulled from each alternative's beta distribution
beta_params.each do |alternative, params|
alpha = params[0]
beta = params[1]
simulated_conversion_rate = Split::Algorithms.beta_distribution_rng(alpha, beta)
simulated_cr_hash[alternative] = simulated_conversion_rate
end
simulated_cr_hash
end
def calc_beta_params(goal = nil)
beta_params = {}
alternatives.each do |alternative|
conversions = goal.nil? ? alternative.completed_count : alternative.completed_count(goal)
alpha = 1 + conversions
beta = 1 + alternative.participant_count - conversions
params = [alpha, beta]
beta_params[alternative] = params
end
beta_params
end
def calc_time=(time)
redis.hset(experiment_config_key, :calc_time, time)
end
def calc_time
redis.hget(experiment_config_key, :calc_time).to_i
end
def jstring(goal = nil)
js_id = if goal.nil?
name
else
name + "-" + goal
end
js_id.gsub("/", "--")
end
def cohorting_disabled?
@cohorting_disabled ||= begin
value = redis.hget(experiment_config_key, :cohorting)
value.nil? ? false : value.downcase == "true"
end
end
def disable_cohorting
@cohorting_disabled = true
redis.hset(experiment_config_key, :cohorting, true.to_s)
end
def enable_cohorting
@cohorting_disabled = false
redis.hset(experiment_config_key, :cohorting, false.to_s)
end
protected
def experiment_config_key
"experiment_configurations/#{@name}"
end
def load_metadata_from_configuration
Split.configuration.experiment_for(@name)[:metadata]
end
def load_metadata_from_redis
meta = redis.get(metadata_key)
JSON.parse(meta) unless meta.nil?
end
def load_alternatives_from_redis
alternatives = case redis.type(@name)
when 'set' # convert legacy sets to lists
alts = redis.smembers(@name)
redis.del(@name)
alts.reverse.each {|a| redis.lpush(@name, a) }
redis.lrange(@name, 0, -1)
else
redis.lrange(@name, 0, -1)
end
alternatives.map do |alt|
alt = begin
JSON.parse(alt)
else
alts.flatten
end
end
def load_alternatives_from_redis
alternatives = redis.lrange(@name, 0, -1)
alternatives.map do |alt|
alt = begin
JSON.parse(alt)
rescue
alt
end
Split::Alternative.new(alt, @name)
end
end
private
def redis
Split.redis
end
def redis_interface
RedisInterface.new
end
def persist_experiment_configuration
redis_interface.add_to_set(:experiments, name)
redis_interface.persist_list(name, @alternatives.map { |alt| { alt.name => alt.weight }.to_json })
goals_collection.save
if @metadata
redis.set(metadata_key, @metadata.to_json)
else
delete_metadata
end
end
def remove_experiment_configuration
@alternatives.each(&:delete)
goals_collection.delete
delete_metadata
redis.del(@name)
end
def experiment_configuration_has_changed?
existing_experiment = Experiment.find(@name)
existing_experiment.alternatives.map(&:to_s) != @alternatives.map(&:to_s) ||
existing_experiment.goals != @goals ||
existing_experiment.metadata != @metadata
end
def goals_collection
Split::GoalsCollection.new(@name, @goals)
end
def remove_experiment_cohorting
@cohorting_disabled = false
redis.hdel(experiment_config_key, :cohorting)
end
end
end
<MSG> Merge pull request #639 from splitrb/remove-old-alternatives-set-compat-code
Remove 'set' parsing for alternatives. Sets were used as storage and deprecated on 0.x
<DFF> @@ -431,15 +431,7 @@ module Split
end
def load_alternatives_from_redis
- alternatives = case redis.type(@name)
- when 'set' # convert legacy sets to lists
- alts = redis.smembers(@name)
- redis.del(@name)
- alts.reverse.each {|a| redis.lpush(@name, a) }
- redis.lrange(@name, 0, -1)
- else
- redis.lrange(@name, 0, -1)
- end
+ alternatives = redis.lrange(@name, 0, -1)
alternatives.map do |alt|
alt = begin
JSON.parse(alt)
| 1 | Merge pull request #639 from splitrb/remove-old-alternatives-set-compat-code | 9 | .rb | rb | mit | splitrb/split |
10071074 | <NME> experiment.rb
<BEF> # frozen_string_literal: true
module Split
class Experiment
attr_accessor :name
attr_accessor :goals
attr_accessor :alternative_probabilities
attr_accessor :metadata
attr_reader :alternatives
attr_reader :resettable
DEFAULT_OPTIONS = {
resettable: true
}
def self.find(name)
Split.cache(:experiments, name) do
return unless Split.redis.exists?(name)
Experiment.new(name).tap { |exp| exp.load_from_redis }
end
end
def initialize(name, options = {})
options = DEFAULT_OPTIONS.merge(options)
@name = name.to_s
extract_alternatives_from_options(options)
end
def self.finished_key(key)
"#{key}:finished"
end
def set_alternatives_and_options(options)
options_with_defaults = DEFAULT_OPTIONS.merge(
options.reject { |k, v| v.nil? }
)
self.alternatives = options_with_defaults[:alternatives]
self.goals = options_with_defaults[:goals]
self.resettable = options_with_defaults[:resettable]
self.algorithm = options_with_defaults[:algorithm]
self.metadata = options_with_defaults[:metadata]
end
def extract_alternatives_from_options(options)
alts = options[:alternatives] || []
if alts.length == 1
if alts[0].is_a? Hash
alts = alts[0].map { |k, v| { k => v } }
end
end
if alts.empty?
exp_config = Split.configuration.experiment_for(name)
if exp_config
alts = load_alternatives_from_configuration
options[:goals] = Split::GoalsCollection.new(@name).load_from_configuration
options[:metadata] = load_metadata_from_configuration
options[:resettable] = exp_config[:resettable]
options[:algorithm] = exp_config[:algorithm]
end
end
options[:alternatives] = alts
set_alternatives_and_options(options)
# calculate probability that each alternative is the winner
@alternative_probabilities = {}
alts
end
def save
validate!
if new_record?
start unless Split.configuration.start_manually
persist_experiment_configuration
elsif experiment_configuration_has_changed?
reset unless Split.configuration.reset_manually
persist_experiment_configuration
end
redis.hmset(experiment_config_key, :resettable, resettable.to_s,
:algorithm, algorithm.to_s)
self
end
def validate!
if @alternatives.empty? && Split.configuration.experiment_for(@name).nil?
raise ExperimentNotFound.new("Experiment #{@name} not found")
end
@alternatives.each { |a| a.validate! }
goals_collection.validate!
end
def new_record?
ExperimentCatalog.find(name).nil?
end
def ==(obj)
self.name == obj.name
end
def [](name)
alternatives.find { |a| a.name == name }
end
def algorithm
@algorithm ||= Split.configuration.algorithm
end
def algorithm=(algorithm)
@algorithm = algorithm.is_a?(String) ? algorithm.constantize : algorithm
end
def resettable=(resettable)
@resettable = resettable.is_a?(String) ? resettable == "true" : resettable
end
def alternatives=(alts)
@alternatives = alts.map do |alternative|
if alternative.kind_of?(Split::Alternative)
alternative
else
Split::Alternative.new(alternative, @name)
end
end
end
def winner
Split.cache(:experiment_winner, name) do
experiment_winner = redis.hget(:experiment_winner, name)
if experiment_winner
Split::Alternative.new(experiment_winner, name)
else
nil
end
end
end
def has_winner?
return @has_winner if defined? @has_winner
@has_winner = !winner.nil?
end
def winner=(winner_name)
redis.hset(:experiment_winner, name, winner_name.to_s)
@has_winner = true
Split.configuration.on_experiment_winner_choose.call(self)
end
def participant_count
alternatives.inject(0) { |sum, a| sum + a.participant_count }
end
def control
alternatives.first
end
def reset_winner
redis.hdel(:experiment_winner, name)
@has_winner = false
Split::Cache.clear_key(@name)
end
def start
redis.hset(:experiment_start_times, @name, Time.now.to_i)
end
def start_time
Split.cache(:experiment_start_times, @name) do
t = redis.hget(:experiment_start_times, @name)
if t
# Check if stored time is an integer
if t =~ /^[-+]?[0-9]+$/
Time.at(t.to_i)
else
Time.parse(t)
end
end
end
end
def next_alternative
winner || random_alternative
end
def random_alternative
if alternatives.length > 1
algorithm.choose_alternative(self)
else
alternatives.first
end
end
def version
@version ||= (redis.get("#{name}:version").to_i || 0)
end
def increment_version
@version = redis.incr("#{name}:version")
end
def key
if version.to_i > 0
"#{name}:#{version}"
else
name
end
end
def goals_key
"#{name}:goals"
end
def finished_key
self.class.finished_key(key)
end
def metadata_key
"#{name}:metadata"
end
def resettable?
resettable
end
def reset
Split.configuration.on_before_experiment_reset.call(self)
Split::Cache.clear_key(@name)
alternatives.each(&:reset)
reset_winner
Split.configuration.on_experiment_reset.call(self)
increment_version
end
def delete
Split.configuration.on_before_experiment_delete.call(self)
if Split.configuration.start_manually
redis.hdel(:experiment_start_times, @name)
end
reset_winner
redis.srem(:experiments, name)
remove_experiment_cohorting
remove_experiment_configuration
Split.configuration.on_experiment_delete.call(self)
increment_version
end
def delete_metadata
redis.del(metadata_key)
end
def load_from_redis
exp_config = redis.hgetall(experiment_config_key)
options = {
resettable: exp_config["resettable"],
algorithm: exp_config["algorithm"],
alternatives: load_alternatives_from_redis,
goals: Split::GoalsCollection.new(@name).load_from_redis,
metadata: load_metadata_from_redis
}
set_alternatives_and_options(options)
end
def calc_winning_alternatives
# Cache the winning alternatives so we recalculate them once per the specified interval.
intervals_since_epoch =
Time.now.utc.to_i / Split.configuration.winning_alternative_recalculation_interval
if self.calc_time != intervals_since_epoch
if goals.empty?
self.estimate_winning_alternative
else
goals.each do |goal|
self.estimate_winning_alternative(goal)
end
end
self.calc_time = intervals_since_epoch
self.save
end
end
def estimate_winning_alternative(goal = nil)
# initialize a hash of beta distributions based on the alternatives' conversion rates
beta_params = calc_beta_params(goal)
winning_alternatives = []
Split.configuration.beta_probability_simulations.times do
# calculate simulated conversion rates from the beta distributions
simulated_cr_hash = calc_simulated_conversion_rates(beta_params)
winning_alternative = find_simulated_winner(simulated_cr_hash)
# push the winning pair to the winning_alternatives array
winning_alternatives.push(winning_alternative)
end
winning_counts = count_simulated_wins(winning_alternatives)
@alternative_probabilities = calc_alternative_probabilities(winning_counts, Split.configuration.beta_probability_simulations)
write_to_alternatives(goal)
self.save
end
def write_to_alternatives(goal = nil)
alternatives.each do |alternative|
alternative.set_p_winner(@alternative_probabilities[alternative], goal)
end
end
def calc_alternative_probabilities(winning_counts, number_of_simulations)
alternative_probabilities = {}
winning_counts.each do |alternative, wins|
alternative_probabilities[alternative] = wins / number_of_simulations.to_f
end
alternative_probabilities
end
def count_simulated_wins(winning_alternatives)
# initialize a hash to keep track of winning alternative in simulations
winning_counts = {}
alternatives.each do |alternative|
winning_counts[alternative] = 0
end
# count number of times each alternative won, calculate probabilities, place in hash
winning_alternatives.each do |alternative|
winning_counts[alternative] += 1
end
winning_counts
end
def find_simulated_winner(simulated_cr_hash)
# figure out which alternative had the highest simulated conversion rate
winning_pair = ["", 0.0]
simulated_cr_hash.each do |alternative, rate|
if rate > winning_pair[1]
winning_pair = [alternative, rate]
end
end
winner = winning_pair[0]
winner
end
def calc_simulated_conversion_rates(beta_params)
simulated_cr_hash = {}
# create a hash which has the conversion rate pulled from each alternative's beta distribution
beta_params.each do |alternative, params|
alpha = params[0]
beta = params[1]
simulated_conversion_rate = Split::Algorithms.beta_distribution_rng(alpha, beta)
simulated_cr_hash[alternative] = simulated_conversion_rate
end
simulated_cr_hash
end
def calc_beta_params(goal = nil)
beta_params = {}
alternatives.each do |alternative|
conversions = goal.nil? ? alternative.completed_count : alternative.completed_count(goal)
alpha = 1 + conversions
beta = 1 + alternative.participant_count - conversions
params = [alpha, beta]
beta_params[alternative] = params
end
beta_params
end
def calc_time=(time)
redis.hset(experiment_config_key, :calc_time, time)
end
def calc_time
redis.hget(experiment_config_key, :calc_time).to_i
end
def jstring(goal = nil)
js_id = if goal.nil?
name
else
name + "-" + goal
end
js_id.gsub("/", "--")
end
def cohorting_disabled?
@cohorting_disabled ||= begin
value = redis.hget(experiment_config_key, :cohorting)
value.nil? ? false : value.downcase == "true"
end
end
def disable_cohorting
@cohorting_disabled = true
redis.hset(experiment_config_key, :cohorting, true.to_s)
end
def enable_cohorting
@cohorting_disabled = false
redis.hset(experiment_config_key, :cohorting, false.to_s)
end
protected
def experiment_config_key
"experiment_configurations/#{@name}"
end
def load_metadata_from_configuration
Split.configuration.experiment_for(@name)[:metadata]
end
def load_metadata_from_redis
meta = redis.get(metadata_key)
JSON.parse(meta) unless meta.nil?
end
def load_alternatives_from_redis
alternatives = case redis.type(@name)
when 'set' # convert legacy sets to lists
alts = redis.smembers(@name)
redis.del(@name)
alts.reverse.each {|a| redis.lpush(@name, a) }
redis.lrange(@name, 0, -1)
else
redis.lrange(@name, 0, -1)
end
alternatives.map do |alt|
alt = begin
JSON.parse(alt)
else
alts.flatten
end
end
def load_alternatives_from_redis
alternatives = redis.lrange(@name, 0, -1)
alternatives.map do |alt|
alt = begin
JSON.parse(alt)
rescue
alt
end
Split::Alternative.new(alt, @name)
end
end
private
def redis
Split.redis
end
def redis_interface
RedisInterface.new
end
def persist_experiment_configuration
redis_interface.add_to_set(:experiments, name)
redis_interface.persist_list(name, @alternatives.map { |alt| { alt.name => alt.weight }.to_json })
goals_collection.save
if @metadata
redis.set(metadata_key, @metadata.to_json)
else
delete_metadata
end
end
def remove_experiment_configuration
@alternatives.each(&:delete)
goals_collection.delete
delete_metadata
redis.del(@name)
end
def experiment_configuration_has_changed?
existing_experiment = Experiment.find(@name)
existing_experiment.alternatives.map(&:to_s) != @alternatives.map(&:to_s) ||
existing_experiment.goals != @goals ||
existing_experiment.metadata != @metadata
end
def goals_collection
Split::GoalsCollection.new(@name, @goals)
end
def remove_experiment_cohorting
@cohorting_disabled = false
redis.hdel(experiment_config_key, :cohorting)
end
end
end
<MSG> Merge pull request #639 from splitrb/remove-old-alternatives-set-compat-code
Remove 'set' parsing for alternatives. Sets were used as storage and deprecated on 0.x
<DFF> @@ -431,15 +431,7 @@ module Split
end
def load_alternatives_from_redis
- alternatives = case redis.type(@name)
- when 'set' # convert legacy sets to lists
- alts = redis.smembers(@name)
- redis.del(@name)
- alts.reverse.each {|a| redis.lpush(@name, a) }
- redis.lrange(@name, 0, -1)
- else
- redis.lrange(@name, 0, -1)
- end
+ alternatives = redis.lrange(@name, 0, -1)
alternatives.map do |alt|
alt = begin
JSON.parse(alt)
| 1 | Merge pull request #639 from splitrb/remove-old-alternatives-set-compat-code | 9 | .rb | rb | mit | splitrb/split |
10071075 | <NME> experiment.rb
<BEF> # frozen_string_literal: true
module Split
class Experiment
attr_accessor :name
attr_accessor :goals
attr_accessor :alternative_probabilities
attr_accessor :metadata
attr_reader :alternatives
attr_reader :resettable
DEFAULT_OPTIONS = {
resettable: true
}
def self.find(name)
Split.cache(:experiments, name) do
return unless Split.redis.exists?(name)
Experiment.new(name).tap { |exp| exp.load_from_redis }
end
end
def initialize(name, options = {})
options = DEFAULT_OPTIONS.merge(options)
@name = name.to_s
extract_alternatives_from_options(options)
end
def self.finished_key(key)
"#{key}:finished"
end
def set_alternatives_and_options(options)
options_with_defaults = DEFAULT_OPTIONS.merge(
options.reject { |k, v| v.nil? }
)
self.alternatives = options_with_defaults[:alternatives]
self.goals = options_with_defaults[:goals]
self.resettable = options_with_defaults[:resettable]
self.algorithm = options_with_defaults[:algorithm]
self.metadata = options_with_defaults[:metadata]
end
def extract_alternatives_from_options(options)
alts = options[:alternatives] || []
if alts.length == 1
if alts[0].is_a? Hash
alts = alts[0].map { |k, v| { k => v } }
end
end
if alts.empty?
exp_config = Split.configuration.experiment_for(name)
if exp_config
alts = load_alternatives_from_configuration
options[:goals] = Split::GoalsCollection.new(@name).load_from_configuration
options[:metadata] = load_metadata_from_configuration
options[:resettable] = exp_config[:resettable]
options[:algorithm] = exp_config[:algorithm]
end
end
options[:alternatives] = alts
set_alternatives_and_options(options)
# calculate probability that each alternative is the winner
@alternative_probabilities = {}
alts
end
def save
validate!
if new_record?
start unless Split.configuration.start_manually
persist_experiment_configuration
elsif experiment_configuration_has_changed?
reset unless Split.configuration.reset_manually
persist_experiment_configuration
end
redis.hmset(experiment_config_key, :resettable, resettable.to_s,
:algorithm, algorithm.to_s)
self
end
def validate!
if @alternatives.empty? && Split.configuration.experiment_for(@name).nil?
raise ExperimentNotFound.new("Experiment #{@name} not found")
end
@alternatives.each { |a| a.validate! }
goals_collection.validate!
end
def new_record?
ExperimentCatalog.find(name).nil?
end
def ==(obj)
self.name == obj.name
end
def [](name)
alternatives.find { |a| a.name == name }
end
def algorithm
@algorithm ||= Split.configuration.algorithm
end
def algorithm=(algorithm)
@algorithm = algorithm.is_a?(String) ? algorithm.constantize : algorithm
end
def resettable=(resettable)
@resettable = resettable.is_a?(String) ? resettable == "true" : resettable
end
def alternatives=(alts)
@alternatives = alts.map do |alternative|
if alternative.kind_of?(Split::Alternative)
alternative
else
Split::Alternative.new(alternative, @name)
end
end
end
def winner
Split.cache(:experiment_winner, name) do
experiment_winner = redis.hget(:experiment_winner, name)
if experiment_winner
Split::Alternative.new(experiment_winner, name)
else
nil
end
end
end
def has_winner?
return @has_winner if defined? @has_winner
@has_winner = !winner.nil?
end
def winner=(winner_name)
redis.hset(:experiment_winner, name, winner_name.to_s)
@has_winner = true
Split.configuration.on_experiment_winner_choose.call(self)
end
def participant_count
alternatives.inject(0) { |sum, a| sum + a.participant_count }
end
def control
alternatives.first
end
def reset_winner
redis.hdel(:experiment_winner, name)
@has_winner = false
Split::Cache.clear_key(@name)
end
def start
redis.hset(:experiment_start_times, @name, Time.now.to_i)
end
def start_time
Split.cache(:experiment_start_times, @name) do
t = redis.hget(:experiment_start_times, @name)
if t
# Check if stored time is an integer
if t =~ /^[-+]?[0-9]+$/
Time.at(t.to_i)
else
Time.parse(t)
end
end
end
end
def next_alternative
winner || random_alternative
end
def random_alternative
if alternatives.length > 1
algorithm.choose_alternative(self)
else
alternatives.first
end
end
def version
@version ||= (redis.get("#{name}:version").to_i || 0)
end
def increment_version
@version = redis.incr("#{name}:version")
end
def key
if version.to_i > 0
"#{name}:#{version}"
else
name
end
end
def goals_key
"#{name}:goals"
end
def finished_key
self.class.finished_key(key)
end
def metadata_key
"#{name}:metadata"
end
def resettable?
resettable
end
def reset
Split.configuration.on_before_experiment_reset.call(self)
Split::Cache.clear_key(@name)
alternatives.each(&:reset)
reset_winner
Split.configuration.on_experiment_reset.call(self)
increment_version
end
def delete
Split.configuration.on_before_experiment_delete.call(self)
if Split.configuration.start_manually
redis.hdel(:experiment_start_times, @name)
end
reset_winner
redis.srem(:experiments, name)
remove_experiment_cohorting
remove_experiment_configuration
Split.configuration.on_experiment_delete.call(self)
increment_version
end
def delete_metadata
redis.del(metadata_key)
end
def load_from_redis
exp_config = redis.hgetall(experiment_config_key)
options = {
resettable: exp_config["resettable"],
algorithm: exp_config["algorithm"],
alternatives: load_alternatives_from_redis,
goals: Split::GoalsCollection.new(@name).load_from_redis,
metadata: load_metadata_from_redis
}
set_alternatives_and_options(options)
end
def calc_winning_alternatives
# Cache the winning alternatives so we recalculate them once per the specified interval.
intervals_since_epoch =
Time.now.utc.to_i / Split.configuration.winning_alternative_recalculation_interval
if self.calc_time != intervals_since_epoch
if goals.empty?
self.estimate_winning_alternative
else
goals.each do |goal|
self.estimate_winning_alternative(goal)
end
end
self.calc_time = intervals_since_epoch
self.save
end
end
def estimate_winning_alternative(goal = nil)
# initialize a hash of beta distributions based on the alternatives' conversion rates
beta_params = calc_beta_params(goal)
winning_alternatives = []
Split.configuration.beta_probability_simulations.times do
# calculate simulated conversion rates from the beta distributions
simulated_cr_hash = calc_simulated_conversion_rates(beta_params)
winning_alternative = find_simulated_winner(simulated_cr_hash)
# push the winning pair to the winning_alternatives array
winning_alternatives.push(winning_alternative)
end
winning_counts = count_simulated_wins(winning_alternatives)
@alternative_probabilities = calc_alternative_probabilities(winning_counts, Split.configuration.beta_probability_simulations)
write_to_alternatives(goal)
self.save
end
def write_to_alternatives(goal = nil)
alternatives.each do |alternative|
alternative.set_p_winner(@alternative_probabilities[alternative], goal)
end
end
def calc_alternative_probabilities(winning_counts, number_of_simulations)
alternative_probabilities = {}
winning_counts.each do |alternative, wins|
alternative_probabilities[alternative] = wins / number_of_simulations.to_f
end
alternative_probabilities
end
def count_simulated_wins(winning_alternatives)
# initialize a hash to keep track of winning alternative in simulations
winning_counts = {}
alternatives.each do |alternative|
winning_counts[alternative] = 0
end
# count number of times each alternative won, calculate probabilities, place in hash
winning_alternatives.each do |alternative|
winning_counts[alternative] += 1
end
winning_counts
end
def find_simulated_winner(simulated_cr_hash)
# figure out which alternative had the highest simulated conversion rate
winning_pair = ["", 0.0]
simulated_cr_hash.each do |alternative, rate|
if rate > winning_pair[1]
winning_pair = [alternative, rate]
end
end
winner = winning_pair[0]
winner
end
def calc_simulated_conversion_rates(beta_params)
simulated_cr_hash = {}
# create a hash which has the conversion rate pulled from each alternative's beta distribution
beta_params.each do |alternative, params|
alpha = params[0]
beta = params[1]
simulated_conversion_rate = Split::Algorithms.beta_distribution_rng(alpha, beta)
simulated_cr_hash[alternative] = simulated_conversion_rate
end
simulated_cr_hash
end
def calc_beta_params(goal = nil)
beta_params = {}
alternatives.each do |alternative|
conversions = goal.nil? ? alternative.completed_count : alternative.completed_count(goal)
alpha = 1 + conversions
beta = 1 + alternative.participant_count - conversions
params = [alpha, beta]
beta_params[alternative] = params
end
beta_params
end
def calc_time=(time)
redis.hset(experiment_config_key, :calc_time, time)
end
def calc_time
redis.hget(experiment_config_key, :calc_time).to_i
end
def jstring(goal = nil)
js_id = if goal.nil?
name
else
name + "-" + goal
end
js_id.gsub("/", "--")
end
def cohorting_disabled?
@cohorting_disabled ||= begin
value = redis.hget(experiment_config_key, :cohorting)
value.nil? ? false : value.downcase == "true"
end
end
def disable_cohorting
@cohorting_disabled = true
redis.hset(experiment_config_key, :cohorting, true.to_s)
end
def enable_cohorting
@cohorting_disabled = false
redis.hset(experiment_config_key, :cohorting, false.to_s)
end
protected
def experiment_config_key
"experiment_configurations/#{@name}"
end
def load_metadata_from_configuration
Split.configuration.experiment_for(@name)[:metadata]
end
def load_metadata_from_redis
meta = redis.get(metadata_key)
JSON.parse(meta) unless meta.nil?
end
def load_alternatives_from_redis
alternatives = case redis.type(@name)
when 'set' # convert legacy sets to lists
alts = redis.smembers(@name)
redis.del(@name)
alts.reverse.each {|a| redis.lpush(@name, a) }
redis.lrange(@name, 0, -1)
else
redis.lrange(@name, 0, -1)
end
alternatives.map do |alt|
alt = begin
JSON.parse(alt)
else
alts.flatten
end
end
def load_alternatives_from_redis
alternatives = redis.lrange(@name, 0, -1)
alternatives.map do |alt|
alt = begin
JSON.parse(alt)
rescue
alt
end
Split::Alternative.new(alt, @name)
end
end
private
def redis
Split.redis
end
def redis_interface
RedisInterface.new
end
def persist_experiment_configuration
redis_interface.add_to_set(:experiments, name)
redis_interface.persist_list(name, @alternatives.map { |alt| { alt.name => alt.weight }.to_json })
goals_collection.save
if @metadata
redis.set(metadata_key, @metadata.to_json)
else
delete_metadata
end
end
def remove_experiment_configuration
@alternatives.each(&:delete)
goals_collection.delete
delete_metadata
redis.del(@name)
end
def experiment_configuration_has_changed?
existing_experiment = Experiment.find(@name)
existing_experiment.alternatives.map(&:to_s) != @alternatives.map(&:to_s) ||
existing_experiment.goals != @goals ||
existing_experiment.metadata != @metadata
end
def goals_collection
Split::GoalsCollection.new(@name, @goals)
end
def remove_experiment_cohorting
@cohorting_disabled = false
redis.hdel(experiment_config_key, :cohorting)
end
end
end
<MSG> Merge pull request #639 from splitrb/remove-old-alternatives-set-compat-code
Remove 'set' parsing for alternatives. Sets were used as storage and deprecated on 0.x
<DFF> @@ -431,15 +431,7 @@ module Split
end
def load_alternatives_from_redis
- alternatives = case redis.type(@name)
- when 'set' # convert legacy sets to lists
- alts = redis.smembers(@name)
- redis.del(@name)
- alts.reverse.each {|a| redis.lpush(@name, a) }
- redis.lrange(@name, 0, -1)
- else
- redis.lrange(@name, 0, -1)
- end
+ alternatives = redis.lrange(@name, 0, -1)
alternatives.map do |alt|
alt = begin
JSON.parse(alt)
| 1 | Merge pull request #639 from splitrb/remove-old-alternatives-set-compat-code | 9 | .rb | rb | mit | splitrb/split |
10071076 | <NME> alternative.rb
<BEF> # frozen_string_literal: true
module Split
class Alternative
attr_accessor :name
attr_accessor :experiment_name
attr_accessor :weight
attr_accessor :recorded_info
def initialize(name, experiment_name)
@experiment_name = experiment_name
if Hash === name
@name = name.keys.first
@weight = name.values.first
@name = name
@weight = 1
end
p_winner = 0.0
end
def to_s
def to_s
name
end
def goals
self.experiment.goals
end
def p_winner(goal = nil)
field = set_prob_field(goal)
@p_winner = Split.redis.hget(key, field).to_f
end
def set_p_winner(prob, goal = nil)
field = set_prob_field(goal)
Split.redis.hset(key, field, prob.to_f)
end
def participant_count
Split.redis.hget(key, "participant_count").to_i
end
def participant_count=(count)
Split.redis.hset(key, "participant_count", count.to_i)
end
def completed_count(goal = nil)
field = set_field(goal)
Split.redis.hget(key, field).to_i
end
def all_completed_count
if goals.empty?
completed_count
else
goals.inject(completed_count) do |sum, g|
sum + completed_count(g)
end
end
end
def unfinished_count
participant_count - all_completed_count
end
def set_field(goal)
field = "completed_count"
field += ":" + goal unless goal.nil?
field
end
def set_prob_field(goal)
field = "p_winner"
field += ":" + goal unless goal.nil?
field
end
def set_completed_count(count, goal = nil)
field = set_field(goal)
Split.redis.hset(key, field, count.to_i)
end
def increment_participation
Split.redis.hincrby key, "participant_count", 1
end
def increment_completion(goal = nil)
field = set_field(goal)
Split.redis.hincrby(key, field, 1)
end
def control?
experiment.control.name == self.name
end
def conversion_rate(goal = nil)
return 0 if participant_count.zero?
(completed_count(goal).to_f)/participant_count.to_f
end
def experiment
Split::ExperimentCatalog.find(experiment_name)
end
def z_score(goal = nil)
# p_a = Pa = proportion of users who converted within the experiment split (conversion rate)
# p_c = Pc = proportion of users who converted within the control split (conversion rate)
# n_a = Na = the number of impressions within the experiment split
# n_c = Nc = the number of impressions within the control split
control = experiment.control
alternative = self
return "N/A" if control.name == alternative.name
p_a = alternative.conversion_rate(goal)
p_c = control.conversion_rate(goal)
n_a = alternative.participant_count
n_c = control.participant_count
# can't calculate zscore for P(x) > 1
return "N/A" if p_a > 1 || p_c > 1
Split::Zscore.calculate(p_a, n_a, p_c, n_c)
end
def extra_info
data = Split.redis.hget(key, "recorded_info")
if data && data.length > 1
begin
JSON.parse(data)
rescue
{}
end
else
{}
end
end
def record_extra_info(k, value = 1)
@recorded_info = self.extra_info || {}
if value.kind_of?(Numeric)
@recorded_info[k] ||= 0
@recorded_info[k] += value
else
@recorded_info[k] = value
end
Split.redis.hset key, "recorded_info", (@recorded_info || {}).to_json
end
def save
Split.redis.hsetnx key, "participant_count", 0
Split.redis.hsetnx key, "completed_count", 0
Split.redis.hsetnx key, "p_winner", p_winner
Split.redis.hsetnx key, "recorded_info", (@recorded_info || {}).to_json
end
def validate!
unless String === @name || hash_with_correct_values?(@name)
raise ArgumentError, "Alternative must be a string"
end
end
def reset
Split.redis.hmset key, "participant_count", 0, "completed_count", 0, "recorded_info", ""
unless goals.empty?
goals.each do |g|
field = "completed_count:#{g}"
Split.redis.hset key, field, 0
end
end
end
def delete
Split.redis.del(key)
end
private
def hash_with_correct_values?(name)
Hash === name && String === name.keys.first && Float(name.values.first) rescue false
end
def key
"#{experiment_name}:#{name}"
end
end
end
<MSG> Assign variable @p_winner correctly
<DFF> @@ -15,7 +15,7 @@ module Split
@name = name
@weight = 1
end
- p_winner = 0.0
+ @p_winner = 0.0
end
def to_s
| 1 | Assign variable @p_winner correctly | 1 | .rb | rb | mit | splitrb/split |
10071077 | <NME> alternative.rb
<BEF> # frozen_string_literal: true
module Split
class Alternative
attr_accessor :name
attr_accessor :experiment_name
attr_accessor :weight
attr_accessor :recorded_info
def initialize(name, experiment_name)
@experiment_name = experiment_name
if Hash === name
@name = name.keys.first
@weight = name.values.first
@name = name
@weight = 1
end
p_winner = 0.0
end
def to_s
def to_s
name
end
def goals
self.experiment.goals
end
def p_winner(goal = nil)
field = set_prob_field(goal)
@p_winner = Split.redis.hget(key, field).to_f
end
def set_p_winner(prob, goal = nil)
field = set_prob_field(goal)
Split.redis.hset(key, field, prob.to_f)
end
def participant_count
Split.redis.hget(key, "participant_count").to_i
end
def participant_count=(count)
Split.redis.hset(key, "participant_count", count.to_i)
end
def completed_count(goal = nil)
field = set_field(goal)
Split.redis.hget(key, field).to_i
end
def all_completed_count
if goals.empty?
completed_count
else
goals.inject(completed_count) do |sum, g|
sum + completed_count(g)
end
end
end
def unfinished_count
participant_count - all_completed_count
end
def set_field(goal)
field = "completed_count"
field += ":" + goal unless goal.nil?
field
end
def set_prob_field(goal)
field = "p_winner"
field += ":" + goal unless goal.nil?
field
end
def set_completed_count(count, goal = nil)
field = set_field(goal)
Split.redis.hset(key, field, count.to_i)
end
def increment_participation
Split.redis.hincrby key, "participant_count", 1
end
def increment_completion(goal = nil)
field = set_field(goal)
Split.redis.hincrby(key, field, 1)
end
def control?
experiment.control.name == self.name
end
def conversion_rate(goal = nil)
return 0 if participant_count.zero?
(completed_count(goal).to_f)/participant_count.to_f
end
def experiment
Split::ExperimentCatalog.find(experiment_name)
end
def z_score(goal = nil)
# p_a = Pa = proportion of users who converted within the experiment split (conversion rate)
# p_c = Pc = proportion of users who converted within the control split (conversion rate)
# n_a = Na = the number of impressions within the experiment split
# n_c = Nc = the number of impressions within the control split
control = experiment.control
alternative = self
return "N/A" if control.name == alternative.name
p_a = alternative.conversion_rate(goal)
p_c = control.conversion_rate(goal)
n_a = alternative.participant_count
n_c = control.participant_count
# can't calculate zscore for P(x) > 1
return "N/A" if p_a > 1 || p_c > 1
Split::Zscore.calculate(p_a, n_a, p_c, n_c)
end
def extra_info
data = Split.redis.hget(key, "recorded_info")
if data && data.length > 1
begin
JSON.parse(data)
rescue
{}
end
else
{}
end
end
def record_extra_info(k, value = 1)
@recorded_info = self.extra_info || {}
if value.kind_of?(Numeric)
@recorded_info[k] ||= 0
@recorded_info[k] += value
else
@recorded_info[k] = value
end
Split.redis.hset key, "recorded_info", (@recorded_info || {}).to_json
end
def save
Split.redis.hsetnx key, "participant_count", 0
Split.redis.hsetnx key, "completed_count", 0
Split.redis.hsetnx key, "p_winner", p_winner
Split.redis.hsetnx key, "recorded_info", (@recorded_info || {}).to_json
end
def validate!
unless String === @name || hash_with_correct_values?(@name)
raise ArgumentError, "Alternative must be a string"
end
end
def reset
Split.redis.hmset key, "participant_count", 0, "completed_count", 0, "recorded_info", ""
unless goals.empty?
goals.each do |g|
field = "completed_count:#{g}"
Split.redis.hset key, field, 0
end
end
end
def delete
Split.redis.del(key)
end
private
def hash_with_correct_values?(name)
Hash === name && String === name.keys.first && Float(name.values.first) rescue false
end
def key
"#{experiment_name}:#{name}"
end
end
end
<MSG> Assign variable @p_winner correctly
<DFF> @@ -15,7 +15,7 @@ module Split
@name = name
@weight = 1
end
- p_winner = 0.0
+ @p_winner = 0.0
end
def to_s
| 1 | Assign variable @p_winner correctly | 1 | .rb | rb | mit | splitrb/split |
10071078 | <NME> alternative.rb
<BEF> # frozen_string_literal: true
module Split
class Alternative
attr_accessor :name
attr_accessor :experiment_name
attr_accessor :weight
attr_accessor :recorded_info
def initialize(name, experiment_name)
@experiment_name = experiment_name
if Hash === name
@name = name.keys.first
@weight = name.values.first
@name = name
@weight = 1
end
p_winner = 0.0
end
def to_s
def to_s
name
end
def goals
self.experiment.goals
end
def p_winner(goal = nil)
field = set_prob_field(goal)
@p_winner = Split.redis.hget(key, field).to_f
end
def set_p_winner(prob, goal = nil)
field = set_prob_field(goal)
Split.redis.hset(key, field, prob.to_f)
end
def participant_count
Split.redis.hget(key, "participant_count").to_i
end
def participant_count=(count)
Split.redis.hset(key, "participant_count", count.to_i)
end
def completed_count(goal = nil)
field = set_field(goal)
Split.redis.hget(key, field).to_i
end
def all_completed_count
if goals.empty?
completed_count
else
goals.inject(completed_count) do |sum, g|
sum + completed_count(g)
end
end
end
def unfinished_count
participant_count - all_completed_count
end
def set_field(goal)
field = "completed_count"
field += ":" + goal unless goal.nil?
field
end
def set_prob_field(goal)
field = "p_winner"
field += ":" + goal unless goal.nil?
field
end
def set_completed_count(count, goal = nil)
field = set_field(goal)
Split.redis.hset(key, field, count.to_i)
end
def increment_participation
Split.redis.hincrby key, "participant_count", 1
end
def increment_completion(goal = nil)
field = set_field(goal)
Split.redis.hincrby(key, field, 1)
end
def control?
experiment.control.name == self.name
end
def conversion_rate(goal = nil)
return 0 if participant_count.zero?
(completed_count(goal).to_f)/participant_count.to_f
end
def experiment
Split::ExperimentCatalog.find(experiment_name)
end
def z_score(goal = nil)
# p_a = Pa = proportion of users who converted within the experiment split (conversion rate)
# p_c = Pc = proportion of users who converted within the control split (conversion rate)
# n_a = Na = the number of impressions within the experiment split
# n_c = Nc = the number of impressions within the control split
control = experiment.control
alternative = self
return "N/A" if control.name == alternative.name
p_a = alternative.conversion_rate(goal)
p_c = control.conversion_rate(goal)
n_a = alternative.participant_count
n_c = control.participant_count
# can't calculate zscore for P(x) > 1
return "N/A" if p_a > 1 || p_c > 1
Split::Zscore.calculate(p_a, n_a, p_c, n_c)
end
def extra_info
data = Split.redis.hget(key, "recorded_info")
if data && data.length > 1
begin
JSON.parse(data)
rescue
{}
end
else
{}
end
end
def record_extra_info(k, value = 1)
@recorded_info = self.extra_info || {}
if value.kind_of?(Numeric)
@recorded_info[k] ||= 0
@recorded_info[k] += value
else
@recorded_info[k] = value
end
Split.redis.hset key, "recorded_info", (@recorded_info || {}).to_json
end
def save
Split.redis.hsetnx key, "participant_count", 0
Split.redis.hsetnx key, "completed_count", 0
Split.redis.hsetnx key, "p_winner", p_winner
Split.redis.hsetnx key, "recorded_info", (@recorded_info || {}).to_json
end
def validate!
unless String === @name || hash_with_correct_values?(@name)
raise ArgumentError, "Alternative must be a string"
end
end
def reset
Split.redis.hmset key, "participant_count", 0, "completed_count", 0, "recorded_info", ""
unless goals.empty?
goals.each do |g|
field = "completed_count:#{g}"
Split.redis.hset key, field, 0
end
end
end
def delete
Split.redis.del(key)
end
private
def hash_with_correct_values?(name)
Hash === name && String === name.keys.first && Float(name.values.first) rescue false
end
def key
"#{experiment_name}:#{name}"
end
end
end
<MSG> Assign variable @p_winner correctly
<DFF> @@ -15,7 +15,7 @@ module Split
@name = name
@weight = 1
end
- p_winner = 0.0
+ @p_winner = 0.0
end
def to_s
| 1 | Assign variable @p_winner correctly | 1 | .rb | rb | mit | splitrb/split |
10071079 | <NME> README.md
<BEF> # [Split](https://libraries.io/rubygems/split)
[](http://badge.fury.io/rb/split)

[](https://codeclimate.com/github/splitrb/split)
[](https://codeclimate.com/github/splitrb/split/coverage)
[](https://github.com/RichardLitt/standard-readme)
[](https://www.codetriage.com/splitrb/split)
> 📈 The Rack Based A/B testing framework https://libraries.io/rubygems/split
Split is a rack based A/B testing framework designed to work with Rails, Sinatra or any other rack based app.
Split is heavily inspired by the [Abingo](https://github.com/ryanb/abingo) and [Vanity](https://github.com/assaf/vanity) Rails A/B testing plugins and [Resque](https://github.com/resque/resque) in its use of Redis.
Split is designed to be hacker friendly, allowing for maximum customisation and extensibility.
## Install
### Requirements
Split v4.0+ is currently tested with Ruby >= 2.5 and Rails >= 5.2.
If your project requires compatibility with Ruby 2.4.x or older Rails versions. You can try v3.0 or v0.8.0(for Ruby 1.9.3)
Split uses Redis as a datastore.
Split only supports Redis 4.0 or greater.
If you're on OS X, Homebrew is the simplest way to install Redis:
```bash
brew install redis
redis-server /usr/local/etc/redis.conf
```
You now have a Redis daemon running on port `6379`.
### Setup
```bash
gem install split
```
#### Rails
Adding `gem 'split'` to your Gemfile will autoload it when rails starts up, as long as you've configured Redis it will 'just work'.
#### Sinatra
To configure Sinatra with Split you need to enable sessions and mix in the helper methods. Add the following lines at the top of your Sinatra app:
```ruby
require 'split'
class MySinatraApp < Sinatra::Base
enable :sessions
helpers Split::Helper
get '/' do
...
end
```
## Usage
To begin your A/B test use the `ab_test` method, naming your experiment with the first argument and then the different alternatives which you wish to test on as the other arguments.
`ab_test` returns one of the alternatives, if a user has already seen that test they will get the same alternative as before, which you can use to split your code on.
It can be used to render different templates, show different text or any other case based logic.
`ab_finished` is used to make a completion of an experiment, or conversion.
Example: View
```erb
<% ab_test(:login_button, "/images/button1.jpg", "/images/button2.jpg") do |button_file| %>
<%= image_tag(button_file, alt: "Login!") %>
<% end %>
```
Example: Controller
```ruby
def register_new_user
# See what level of free points maximizes users' decision to buy replacement points.
@starter_points = ab_test(:new_user_free_points, '100', '200', '300')
end
```
Example: Conversion tracking (in a controller!)
```ruby
def buy_new_points
# some business logic
ab_finished(:new_user_free_points)
end
```
Example: Conversion tracking (in a view)
```erb
Thanks for signing up, dude! <% ab_finished(:signup_page_redesign) %>
```
You can find more examples, tutorials and guides on the [wiki](https://github.com/splitrb/split/wiki).
## Statistical Validity
Split has two options for you to use to determine which alternative is the best.
The first option (default on the dashboard) uses a z test (n>30) for the difference between your control and alternative conversion rates to calculate statistical significance. This test will tell you whether an alternative is better or worse than your control, but it will not distinguish between which alternative is the best in an experiment with multiple alternatives. Split will only tell you if your experiment is 90%, 95%, or 99% significant, and this test only works if you have more than 30 participants and 5 conversions for each branch.
As per this [blog post](https://www.evanmiller.org/how-not-to-run-an-ab-test.html) on the pitfalls of A/B testing, it is highly recommended that you determine your requisite sample size for each branch before running the experiment. Otherwise, you'll have an increased rate of false positives (experiments which show a significant effect where really there is none).
[Here](https://www.evanmiller.org/ab-testing/sample-size.html) is a sample size calculator for your convenience.
The second option uses simulations from a beta distribution to determine the probability that the given alternative is the winner compared to all other alternatives. You can view these probabilities by clicking on the drop-down menu labeled "Confidence." This option should be used when the experiment has more than just 1 control and 1 alternative. It can also be used for a simple, 2-alternative A/B test.
Calculating the beta-distribution simulations for a large number of experiments can be slow, so the results are cached. You can specify how often they should be recalculated (the default is once per day).
```ruby
Split.configure do |config|
config.winning_alternative_recalculation_interval = 3600 # 1 hour
end
```
## Extras
### Weighted alternatives
Perhaps you only want to show an alternative to 10% of your visitors because it is very experimental or not yet fully load tested.
To do this you can pass a weight with each alternative in the following ways:
```ruby
ab_test(:homepage_design, {'Old' => 18}, {'New' => 2})
ab_test(:homepage_design, 'Old', {'New' => 1.0/9})
ab_test(:homepage_design, {'Old' => 9}, 'New')
```
This will only show the new alternative to visitors 1 in 10 times, the default weight for an alternative is 1.
### Overriding alternatives
For development and testing, you may wish to force your app to always return an alternative.
You can do this by passing it as a parameter in the url.
If you have an experiment called `button_color` with alternatives called `red` and `blue` used on your homepage, a url such as:
http://myawesomesite.com?ab_test[button_color]=red
will always have red buttons. This won't be stored in your session or count towards to results, unless you set the `store_override` configuration option.
In the event you want to disable all tests without having to know the individual experiment names, add a `SPLIT_DISABLE` query parameter.
http://myawesomesite.com?SPLIT_DISABLE=true
It is not required to send `SPLIT_DISABLE=false` to activate Split.
### Rspec Helper
To aid testing with RSpec, write `spec/support/split_helper.rb` and call `use_ab_test(alternatives_by_experiment)` in your specs as instructed below:
```ruby
# Create a file with these contents at 'spec/support/split_helper.rb'
# and ensure it is `require`d in your rails_helper.rb or spec_helper.rb
module SplitHelper
# Force a specific experiment alternative to always be returned:
# use_ab_test(signup_form: "single_page")
#
# Force alternatives for multiple experiments:
# use_ab_test(signup_form: "single_page", pricing: "show_enterprise_prices")
#
def use_ab_test(alternatives_by_experiment)
allow_any_instance_of(Split::Helper).to receive(:ab_test) do |_receiver, experiment, &block|
variant = alternatives_by_experiment.fetch(experiment) { |key| raise "Unknown experiment '#{key}'" }
block.call(variant) unless block.nil?
variant
end
end
end
# Make the `use_ab_test` method available to all specs:
RSpec.configure do |config|
config.include SplitHelper
end
```
Now you can call `use_ab_test(alternatives_by_experiment)` in your specs, for example:
```ruby
it "registers using experimental signup" do
use_ab_test experiment_name: "alternative_name"
post "/signups"
...
end
```
### Starting experiments manually
By default new A/B tests will be active right after deployment. In case you would like to start new test a while after
the deploy, you can do it by setting the `start_manually` configuration option to `true`.
After choosing this option tests won't be started right after deploy, but after pressing the `Start` button in Split admin dashboard. If a test is deleted from the Split dashboard, then it can only be started after pressing the `Start` button whenever being re-initialized.
### Reset after completion
When a user completes a test their session is reset so that they may start the test again in the future.
To stop this behaviour you can pass the following option to the `ab_finished` method:
```ruby
ab_finished(:experiment_name, reset: false)
```
The user will then always see the alternative they started with.
Any old unfinished experiment key will be deleted from the user's data storage if the experiment had been removed or is over and a winner had been chosen. This allows a user to enroll into any new experiment in cases when the `allow_multiple_experiments` config option is set to `false`.
### Reset experiments manually
By default Split automatically resets the experiment whenever it detects the configuration for an experiment has changed (e.g. you call `ab_test` with different alternatives). You can prevent this by setting the option `reset_manually` to `true`.
You may want to do this when you want to change something, like the variants' names, the metadata about an experiment, etc. without resetting everything.
### Multiple experiments at once
By default Split will avoid users participating in multiple experiments at once. This means you are less likely to skew results by adding in more variation to your tests.
To stop this behaviour and allow users to participate in multiple experiments at once set the `allow_multiple_experiments` config option to true like so:
```ruby
Split.configure do |config|
config.allow_multiple_experiments = true
end
```
This will allow the user to participate in any number of experiments and belong to any alternative in each experiment. This has the possible downside of a variation in one experiment influencing the outcome of another.
To address this, setting the `allow_multiple_experiments` config option to 'control' like so:
```ruby
Split.configure do |config|
config.allow_multiple_experiments = 'control'
end
```
For this to work, each and every experiment you define must have an alternative named 'control'. This will allow the user to participate in multiple experiments as long as the user belongs to the alternative 'control' in each experiment. As soon as the user belongs to an alternative named something other than 'control' the user may not participate in any more experiments. Calling ab_test(<other experiments>) will always return the first alternative without adding the user to that experiment.
### Experiment Persistence
Split comes with three built-in persistence adapters for storing users and the alternatives they've been given for each experiment.
By default Split will store the tests for each user in the session.
You can optionally configure Split to use a cookie, Redis, or any custom adapter of your choosing.
#### Cookies
```ruby
Split.configure do |config|
config.persistence = :cookie
end
```
When using the cookie persistence, Split stores data into an anonymous tracking cookie named 'split', which expires in 1 year. To change that, set the `persistence_cookie_length` in the configuration (unit of time in seconds).
```ruby
Split.configure do |config|
config.persistence = :cookie
config.persistence_cookie_length = 2592000 # 30 days
end
```
The data stored consists of the experiment name and the variants the user is in. Example: { "experiment_name" => "variant_a" }
__Note:__ Using cookies depends on `ActionDispatch::Cookies` or any identical API
#### Redis
Using Redis will allow ab_users to persist across sessions or machines.
```ruby
Split.configure do |config|
config.persistence = Split::Persistence::RedisAdapter.with_config(lookup_by: -> (context) { context.current_user_id })
# Equivalent
# config.persistence = Split::Persistence::RedisAdapter.with_config(lookup_by: :current_user_id)
end
```
Options:
* `lookup_by`: method to invoke per request for uniquely identifying ab_users (mandatory configuration)
* `namespace`: separate namespace to store these persisted values (default "persistence")
* `expire_seconds`: sets TTL for user key. (if a user is in multiple experiments most recent update will reset TTL for all their assignments)
#### Dual Adapter
The Dual Adapter allows the use of different persistence adapters for logged-in and logged-out users. A common use case is to use Redis for logged-in users and Cookies for logged-out users.
```ruby
cookie_adapter = Split::Persistence::CookieAdapter
redis_adapter = Split::Persistence::RedisAdapter.with_config(
lookup_by: -> (context) { context.send(:current_user).try(:id) },
expire_seconds: 2592000)
Split.configure do |config|
config.persistence = Split::Persistence::DualAdapter.with_config(
logged_in: -> (context) { !context.send(:current_user).try(:id).nil? },
logged_in_adapter: redis_adapter,
logged_out_adapter: cookie_adapter)
config.persistence_cookie_length = 2592000 # 30 days
end
```
#### Custom Adapter
Your custom adapter needs to implement the same API as existing adapters.
See `Split::Persistence::CookieAdapter` or `Split::Persistence::SessionAdapter` for a starting point.
```ruby
Split.configure do |config|
config.persistence = YourCustomAdapterClass
end
```
### Trial Event Hooks
You can define methods that will be called at the same time as experiment
alternative participation and goal completion.
For example:
``` ruby
Split.configure do |config|
config.on_trial = :log_trial # run on every trial
config.on_trial_choose = :log_trial_choose # run on trials with new users only
config.on_trial_complete = :log_trial_complete
end
```
Set these attributes to a method name available in the same context as the
`ab_test` method. These methods should accept one argument, a `Trial` instance.
``` ruby
def log_trial(trial)
logger.info "experiment=%s alternative=%s user=%s" %
[ trial.experiment.name, trial.alternative, current_user.id ]
end
def log_trial_choose(trial)
logger.info "[new user] experiment=%s alternative=%s user=%s" %
[ trial.experiment.name, trial.alternative, current_user.id ]
end
def log_trial_complete(trial)
logger.info "experiment=%s alternative=%s user=%s complete=true" %
[ trial.experiment.name, trial.alternative, current_user.id ]
end
```
#### Views
If you are running `ab_test` from a view, you must define your event
hook callback as a
[helper_method](https://apidock.com/rails/AbstractController/Helpers/ClassMethods/helper_method)
in the controller:
``` ruby
helper_method :log_trial_choose
def log_trial_choose(trial)
logger.info "experiment=%s alternative=%s user=%s" %
[ trial.experiment.name, trial.alternative, current_user.id ]
end
```
### Experiment Hooks
You can assign a proc that will be called when an experiment is reset or deleted. You can use these hooks to call methods within your application to keep data related to experiments in sync with Split.
For example:
``` ruby
Split.configure do |config|
# after experiment reset or deleted
config.on_experiment_reset = -> (example) { # Do something on reset }
config.on_experiment_delete = -> (experiment) { # Do something else on delete }
# before experiment reset or deleted
config.on_before_experiment_reset = -> (example) { # Do something on reset }
config.on_before_experiment_delete = -> (experiment) { # Do something else on delete }
# after experiment winner had been set
config.on_experiment_winner_choose = -> (experiment) { # Do something on winner choose }
end
```
## Web Interface
Split comes with a Sinatra-based front end to get an overview of how your experiments are doing.
If you are running Rails 2: You can mount this inside your app using Rack::URLMap in your `config.ru`
```ruby
require 'split/dashboard'
run Rack::URLMap.new \
"/" => Your::App.new,
"/split" => Split::Dashboard.new
```
However, if you are using Rails 3 or higher: You can mount this inside your app routes by first adding this to the Gemfile:
```ruby
gem 'split', require: 'split/dashboard'
```
Then adding this to config/routes.rb
```ruby
mount Split::Dashboard, at: 'split'
```
You may want to password protect that page, you can do so with `Rack::Auth::Basic` (in your split initializer file)
```ruby
# Rails apps or apps that already depend on activesupport
Split::Dashboard.use Rack::Auth::Basic do |username, password|
# Protect against timing attacks:
# - Use & (do not use &&) so that it doesn't short circuit.
# - Use digests to stop length information leaking
ActiveSupport::SecurityUtils.secure_compare(::Digest::SHA256.hexdigest(username), ::Digest::SHA256.hexdigest(ENV["SPLIT_USERNAME"])) &
ActiveSupport::SecurityUtils.secure_compare(::Digest::SHA256.hexdigest(password), ::Digest::SHA256.hexdigest(ENV["SPLIT_PASSWORD"]))
end
# Apps without activesupport
Split::Dashboard.use Rack::Auth::Basic do |username, password|
# Protect against timing attacks:
# - Use & (do not use &&) so that it doesn't short circuit.
# - Use digests to stop length information leaking
Rack::Utils.secure_compare(::Digest::SHA256.hexdigest(username), ::Digest::SHA256.hexdigest(ENV["SPLIT_USERNAME"])) &
Rack::Utils.secure_compare(::Digest::SHA256.hexdigest(password), ::Digest::SHA256.hexdigest(ENV["SPLIT_PASSWORD"]))
end
```
You can even use Devise or any other Warden-based authentication method to authorize users. Just replace `mount Split::Dashboard, :at => 'split'` in `config/routes.rb` with the following:
```ruby
match "/split" => Split::Dashboard, anchor: false, via: [:get, :post, :delete], constraints: -> (request) do
request.env['warden'].authenticated? # are we authenticated?
request.env['warden'].authenticate! # authenticate if not already
# or even check any other condition such as request.env['warden'].user.is_admin?
end
```
More information on this [here](https://steve.dynedge.co.uk/2011/12/09/controlling-access-to-routes-and-rack-apps-in-rails-3-with-devise-and-warden/)
### Screenshot

## Configuration
You can override the default configuration options of Split like so:
```ruby
Split.configure do |config|
config.db_failover = true # handle Redis errors gracefully
config.db_failover_on_db_error = -> (error) { Rails.logger.error(error.message) }
config.allow_multiple_experiments = true
config.enabled = true
config.persistence = Split::Persistence::SessionAdapter
#config.start_manually = false ## new test will have to be started manually from the admin panel. default false
#config.reset_manually = false ## if true, it never resets the experiment data, even if the configuration changes
config.include_rails_helper = true
config.redis = "redis://custom.redis.url:6380"
end
```
Split looks for the Redis host in the environment variable `REDIS_URL` then
defaults to `redis://localhost:6379` if not specified by configure block.
On platforms like Heroku, Split will use the value of `REDIS_PROVIDER` to
determine which env variable key to use when retrieving the host config. This
defaults to `REDIS_URL`.
### Filtering
In most scenarios you don't want to have AB-Testing enabled for web spiders, robots or special groups of users.
Split provides functionality to filter this based on a predefined, extensible list of bots, IP-lists or custom exclude logic.
```ruby
Split.configure do |config|
# bot config
config.robot_regex = /my_custom_robot_regex/ # or
config.bots['newbot'] = "Description for bot with 'newbot' user agent, which will be added to config.robot_regex for exclusion"
# IP config
config.ignore_ip_addresses << '81.19.48.130' # or regex: /81\.19\.48\.[0-9]+/
# or provide your own filter functionality, the default is proc{ |request| is_robot? || is_ignored_ip_address? || is_preview? }
config.ignore_filter = -> (request) { CustomExcludeLogic.excludes?(request) }
end
```
### Experiment configuration
Instead of providing the experiment options inline, you can store them
in a hash. This hash can control your experiment's alternatives, weights,
algorithm and if the experiment resets once finished:
```ruby
Split.configure do |config|
config.experiments = {
my_first_experiment: {
alternatives: ["a", "b"],
resettable: false
},
:my_second_experiment => {
algorithm: 'Split::Algorithms::Whiplash',
alternatives: [
{ name: "a", percent: 67 },
{ name: "b", percent: 33 }
]
}
}
end
```
You can also store your experiments in a YAML file:
```ruby
Split.configure do |config|
config.experiments = YAML.load_file "config/experiments.yml"
end
```
You can then define the YAML file like:
```yaml
my_first_experiment:
alternatives:
- a
- b
my_second_experiment:
alternatives:
- name: a
And our initializer:
```ruby
rails_root = ENV['RAILS_ROOT'] || File.dirname(__FILE__) + '/../..'
rails_env = ENV['RAILS_ENV'] || 'development'
split_config = YAML.load_file(rails_root + '/config/split.yml')
Split.redis = split_config[rails_env]
```
## Namespaces
```ruby
ab_test(:my_first_experiment)
```
and:
```ruby
ab_finished(:my_first_experiment)
```
You can also add meta data for each experiment, which is very useful when you need more than an alternative name to change behaviour:
```ruby
Split.configure do |config|
config.experiments = {
my_first_experiment: {
alternatives: ["a", "b"],
metadata: {
"a" => {"text" => "Have a fantastic day"},
"b" => {"text" => "Don't get hit by a bus"}
}
}
}
end
```
```yaml
my_first_experiment:
alternatives:
- a
- b
metadata:
a:
text: "Have a fantastic day"
b:
text: "Don't get hit by a bus"
```
This allows for some advanced experiment configuration using methods like:
```ruby
trial.alternative.name # => "a"
trial.metadata['text'] # => "Have a fantastic day"
```
or in views:
```erb
<% ab_test("my_first_experiment") do |alternative, meta| %>
<%= alternative %>
<small><%= meta['text'] %></small>
<% end %>
```
The keys used in meta data should be Strings
#### Metrics
You might wish to track generic metrics, such as conversions, and use
those to complete multiple different experiments without adding more to
your code. You can use the configuration hash to do this, thanks to
the `:metric` option.
```ruby
Split.configure do |config|
config.experiments = {
my_first_experiment: {
alternatives: ["a", "b"],
metric: :my_metric
}
}
end
```
Your code may then track a completion using the metric instead of
the experiment name:
```ruby
ab_finished(:my_metric)
```
You can also create a new metric by instantiating and saving a new Metric object.
```ruby
Split::Metric.new(:my_metric)
Split::Metric.save
```
#### Goals
You might wish to allow an experiment to have multiple, distinguishable goals.
The API to define goals for an experiment is this:
```ruby
ab_test({link_color: ["purchase", "refund"]}, "red", "blue")
```
or you can define them in a configuration file:
```ruby
Split.configure do |config|
config.experiments = {
link_color: {
alternatives: ["red", "blue"],
goals: ["purchase", "refund"]
}
}
end
```
To complete a goal conversion, you do it like:
```ruby
ab_finished(link_color: "purchase")
```
Note that if you pass additional options, that should be a separate hash:
```ruby
ab_finished({ link_color: "purchase" }, reset: false)
```
**NOTE:** This does not mean that a single experiment can complete more than one goal.
Once you finish one of the goals, the test is considered to be completed, and finishing the other goal will no longer register. (Assuming the test runs with `reset: false`.)
**Good Example**: Test if listing Plan A first result in more conversions to Plan A (goal: "plana_conversion") or Plan B (goal: "planb_conversion").
**Bad Example**: Test if button color increases conversion rate through multiple steps of a funnel. THIS WILL NOT WORK.
**Bad Example**: Test both how button color affects signup *and* how it affects login, at the same time. THIS WILL NOT WORK.
#### Combined Experiments
If you want to test how button color affects signup *and* how it affects login at the same time, use combined experiments.
Configure like so:
```ruby
Split.configuration.experiments = {
:button_color_experiment => {
:alternatives => ["blue", "green"],
:combined_experiments => ["button_color_on_signup", "button_color_on_login"]
}
}
```
Starting the combined test starts all combined experiments
```ruby
ab_combined_test(:button_color_experiment)
```
Finish each combined test as normal
```ruby
ab_finished(:button_color_on_login)
ab_finished(:button_color_on_signup)
```
**Additional Configuration**:
* Be sure to enable `allow_multiple_experiments`
* In Sinatra include the CombinedExperimentsHelper
```
helpers Split::CombinedExperimentsHelper
```
### DB failover solution
Due to the fact that Redis has no automatic failover mechanism, it's
possible to switch on the `db_failover` config option, so that `ab_test`
and `ab_finished` will not crash in case of a db failure. `ab_test` always
delivers alternative A (the first one) in that case.
It's also possible to set a `db_failover_on_db_error` callback (proc)
for example to log these errors via Rails.logger.
### Redis
You may want to change the Redis host and port Split connects to, or
set various other options at startup.
Split has a `redis` setter which can be given a string or a Redis
object. This means if you're already using Redis in your app, Split
can re-use the existing connection.
String: `Split.redis = 'redis://localhost:6379'`
Redis: `Split.redis = $redis`
For our rails app we have a `config/initializers/split.rb` file where
we load `config/split.yml` by hand and set the Redis information
appropriately.
Here's our `config/split.yml`:
```yml
development: redis://localhost:6379
test: redis://localhost:6379
staging: redis://redis1.example.com:6379
fi: redis://localhost:6379
production: redis://redis1.example.com:6379
```
And our initializer:
```ruby
split_config = YAML.load_file(Rails.root.join('config', 'split.yml'))
Split.redis = split_config[Rails.env]
```
### Redis Caching (v4.0+)
In some high-volume usage scenarios, Redis load can be incurred by repeated
fetches for fairly static data. Enabling caching will reduce this load.
```ruby
Split.configuration.cache = true
````
This currently caches:
- `Split::ExperimentCatalog.find`
- `Split::Experiment.start_time`
- `Split::Experiment.winner`
## Namespaces
If you're running multiple, separate instances of Split you may want
to namespace the keyspaces so they do not overlap. This is not unlike
the approach taken by many memcached clients.
This feature can be provided by the [redis-namespace](https://github.com/defunkt/redis-namespace)
library. To configure Split to use `Redis::Namespace`, do the following:
1. Add `redis-namespace` to your Gemfile:
```ruby
gem 'redis-namespace'
```
2. Configure `Split.redis` to use a `Redis::Namespace` instance (possible in an
initializer):
```ruby
redis = Redis.new(url: ENV['REDIS_URL']) # or whatever config you want
Split.redis = Redis::Namespace.new(:your_namespace, redis: redis)
```
## Outside of a Web Session
Split provides the Helper module to facilitate running experiments inside web sessions.
Alternatively, you can access the underlying Metric, Trial, Experiment and Alternative objects to
conduct experiments that are not tied to a web session.
```ruby
# create a new experiment
experiment = Split::ExperimentCatalog.find_or_create('color', 'red', 'blue')
# create a new trial
trial = Split::Trial.new(:experiment => experiment)
# run trial
trial.choose!
# get the result, returns either red or blue
trial.alternative.name
# if the goal has been achieved, increment the successful completions for this alternative.
if goal_achieved?
trial.complete!
end
```
## Algorithms
By default, Split ships with `Split::Algorithms::WeightedSample` that randomly selects from possible alternatives for a traditional a/b test.
It is possible to specify static weights to favor certain alternatives.
`Split::Algorithms::Whiplash` is an implementation of a [multi-armed bandit algorithm](http://stevehanov.ca/blog/index.php?id=132).
This algorithm will automatically weight the alternatives based on their relative performance,
choosing the better-performing ones more often as trials are completed.
`Split::Algorithms::BlockRandomization` is an algorithm that ensures equal
participation across all alternatives. This algorithm will choose the alternative
with the fewest participants. In the event of multiple minimum participant alternatives
(i.e. starting a new "Block") the algorithm will choose a random alternative from
those minimum participant alternatives.
Users may also write their own algorithms. The default algorithm may be specified globally in the configuration file, or on a per experiment basis using the experiments hash of the configuration file.
To change the algorithm globally for all experiments, use the following in your initializer:
```ruby
Split.configure do |config|
config.algorithm = Split::Algorithms::Whiplash
end
```
## Extensions
- [Split::Export](https://github.com/splitrb/split-export) - Easily export A/B test data out of Split.
- [Split::Analytics](https://github.com/splitrb/split-analytics) - Push test data to Google Analytics.
- [Split::Mongoid](https://github.com/MongoHQ/split-mongoid) - Store experiment data in mongoid (still uses redis).
- [Split::Cacheable](https://github.com/harrystech/split_cacheable) - Automatically create cache buckets per test.
- [Split::Counters](https://github.com/bernardkroes/split-counters) - Add counters per experiment and alternative.
- [Split::Cli](https://github.com/craigmcnamara/split-cli) - A CLI to trigger Split A/B tests.
## Screencast
Ryan bates has produced an excellent 10 minute screencast about split on the Railscasts site: [A/B Testing with Split](http://railscasts.com/episodes/331-a-b-testing-with-split)
## Blogposts
* [Recipe: A/B testing with KISSMetrics and the split gem](https://robots.thoughtbot.com/post/9595887299/recipe-a-b-testing-with-kissmetrics-and-the-split-gem)
* [Rails A/B testing with Split on Heroku](http://blog.nathanhumbert.com/2012/02/rails-ab-testing-with-split-on-heroku.html)
## Backers
Support us with a monthly donation and help us continue our activities. [[Become a backer](https://opencollective.com/split#backer)]
<a href="https://opencollective.com/split/backer/0/website" target="_blank"><img src="https://opencollective.com/split/backer/0/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/1/website" target="_blank"><img src="https://opencollective.com/split/backer/1/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/2/website" target="_blank"><img src="https://opencollective.com/split/backer/2/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/3/website" target="_blank"><img src="https://opencollective.com/split/backer/3/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/4/website" target="_blank"><img src="https://opencollective.com/split/backer/4/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/5/website" target="_blank"><img src="https://opencollective.com/split/backer/5/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/6/website" target="_blank"><img src="https://opencollective.com/split/backer/6/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/7/website" target="_blank"><img src="https://opencollective.com/split/backer/7/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/8/website" target="_blank"><img src="https://opencollective.com/split/backer/8/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/9/website" target="_blank"><img src="https://opencollective.com/split/backer/9/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/10/website" target="_blank"><img src="https://opencollective.com/split/backer/10/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/11/website" target="_blank"><img src="https://opencollective.com/split/backer/11/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/12/website" target="_blank"><img src="https://opencollective.com/split/backer/12/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/13/website" target="_blank"><img src="https://opencollective.com/split/backer/13/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/14/website" target="_blank"><img src="https://opencollective.com/split/backer/14/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/15/website" target="_blank"><img src="https://opencollective.com/split/backer/15/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/16/website" target="_blank"><img src="https://opencollective.com/split/backer/16/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/17/website" target="_blank"><img src="https://opencollective.com/split/backer/17/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/18/website" target="_blank"><img src="https://opencollective.com/split/backer/18/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/19/website" target="_blank"><img src="https://opencollective.com/split/backer/19/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/20/website" target="_blank"><img src="https://opencollective.com/split/backer/20/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/21/website" target="_blank"><img src="https://opencollective.com/split/backer/21/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/22/website" target="_blank"><img src="https://opencollective.com/split/backer/22/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/23/website" target="_blank"><img src="https://opencollective.com/split/backer/23/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/24/website" target="_blank"><img src="https://opencollective.com/split/backer/24/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/25/website" target="_blank"><img src="https://opencollective.com/split/backer/25/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/26/website" target="_blank"><img src="https://opencollective.com/split/backer/26/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/27/website" target="_blank"><img src="https://opencollective.com/split/backer/27/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/28/website" target="_blank"><img src="https://opencollective.com/split/backer/28/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/29/website" target="_blank"><img src="https://opencollective.com/split/backer/29/avatar.svg"></a>
## Sponsors
Become a sponsor and get your logo on our README on Github with a link to your site. [[Become a sponsor](https://opencollective.com/split#sponsor)]
<a href="https://opencollective.com/split/sponsor/0/website" target="_blank"><img src="https://opencollective.com/split/sponsor/0/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/1/website" target="_blank"><img src="https://opencollective.com/split/sponsor/1/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/2/website" target="_blank"><img src="https://opencollective.com/split/sponsor/2/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/3/website" target="_blank"><img src="https://opencollective.com/split/sponsor/3/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/4/website" target="_blank"><img src="https://opencollective.com/split/sponsor/4/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/5/website" target="_blank"><img src="https://opencollective.com/split/sponsor/5/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/6/website" target="_blank"><img src="https://opencollective.com/split/sponsor/6/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/7/website" target="_blank"><img src="https://opencollective.com/split/sponsor/7/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/8/website" target="_blank"><img src="https://opencollective.com/split/sponsor/8/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/9/website" target="_blank"><img src="https://opencollective.com/split/sponsor/9/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/10/website" target="_blank"><img src="https://opencollective.com/split/sponsor/10/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/11/website" target="_blank"><img src="https://opencollective.com/split/sponsor/11/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/12/website" target="_blank"><img src="https://opencollective.com/split/sponsor/12/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/13/website" target="_blank"><img src="https://opencollective.com/split/sponsor/13/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/14/website" target="_blank"><img src="https://opencollective.com/split/sponsor/14/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/15/website" target="_blank"><img src="https://opencollective.com/split/sponsor/15/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/16/website" target="_blank"><img src="https://opencollective.com/split/sponsor/16/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/17/website" target="_blank"><img src="https://opencollective.com/split/sponsor/17/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/18/website" target="_blank"><img src="https://opencollective.com/split/sponsor/18/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/19/website" target="_blank"><img src="https://opencollective.com/split/sponsor/19/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/20/website" target="_blank"><img src="https://opencollective.com/split/sponsor/20/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/21/website" target="_blank"><img src="https://opencollective.com/split/sponsor/21/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/22/website" target="_blank"><img src="https://opencollective.com/split/sponsor/22/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/23/website" target="_blank"><img src="https://opencollective.com/split/sponsor/23/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/24/website" target="_blank"><img src="https://opencollective.com/split/sponsor/24/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/25/website" target="_blank"><img src="https://opencollective.com/split/sponsor/25/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/26/website" target="_blank"><img src="https://opencollective.com/split/sponsor/26/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/27/website" target="_blank"><img src="https://opencollective.com/split/sponsor/27/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/28/website" target="_blank"><img src="https://opencollective.com/split/sponsor/28/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/29/website" target="_blank"><img src="https://opencollective.com/split/sponsor/29/avatar.svg"></a>
## Contribute
Please do! Over 70 different people have contributed to the project, you can see them all here: https://github.com/splitrb/split/graphs/contributors.
### Development
The source code is hosted at [GitHub](https://github.com/splitrb/split).
Report issues and feature requests on [GitHub Issues](https://github.com/splitrb/split/issues).
You can find a discussion form on [Google Groups](https://groups.google.com/d/forum/split-ruby).
### Tests
Run the tests like this:
# Start a Redis server in another tab.
redis-server
bundle
rake spec
### A Note on Patches and Pull Requests
* Fork the project.
* Make your feature addition or bug fix.
* Add tests for it. This is important so I don't break it in a
future version unintentionally.
* Add documentation if necessary.
* Commit. Do not mess with the rakefile, version, or history.
(If you want to have your own version, that is fine. But bump the version in a commit by itself, which I can ignore when I pull.)
* Send a pull request. Bonus points for topic branches.
### Code of Conduct
Please note that this project is released with a [Contributor Code of Conduct](CODE_OF_CONDUCT.md). By participating in this project you agree to abide by its terms.
## Copyright
[MIT License](LICENSE) © 2019 [Andrew Nesbitt](https://github.com/andrew).
<MSG> update README with new syntax
<DFF> @@ -549,11 +549,8 @@ production: redis1.example.com:6379
And our initializer:
```ruby
-rails_root = ENV['RAILS_ROOT'] || File.dirname(__FILE__) + '/../..'
-rails_env = ENV['RAILS_ENV'] || 'development'
-
-split_config = YAML.load_file(rails_root + '/config/split.yml')
-Split.redis = split_config[rails_env]
+split_config = YAML.load_file(Rails.root.join('config', 'split.yml'))
+Split.redis = split_config[Rails.env]
```
## Namespaces
| 2 | update README with new syntax | 5 | .md | md | mit | splitrb/split |
10071080 | <NME> README.md
<BEF> # [Split](https://libraries.io/rubygems/split)
[](http://badge.fury.io/rb/split)

[](https://codeclimate.com/github/splitrb/split)
[](https://codeclimate.com/github/splitrb/split/coverage)
[](https://github.com/RichardLitt/standard-readme)
[](https://www.codetriage.com/splitrb/split)
> 📈 The Rack Based A/B testing framework https://libraries.io/rubygems/split
Split is a rack based A/B testing framework designed to work with Rails, Sinatra or any other rack based app.
Split is heavily inspired by the [Abingo](https://github.com/ryanb/abingo) and [Vanity](https://github.com/assaf/vanity) Rails A/B testing plugins and [Resque](https://github.com/resque/resque) in its use of Redis.
Split is designed to be hacker friendly, allowing for maximum customisation and extensibility.
## Install
### Requirements
Split v4.0+ is currently tested with Ruby >= 2.5 and Rails >= 5.2.
If your project requires compatibility with Ruby 2.4.x or older Rails versions. You can try v3.0 or v0.8.0(for Ruby 1.9.3)
Split uses Redis as a datastore.
Split only supports Redis 4.0 or greater.
If you're on OS X, Homebrew is the simplest way to install Redis:
```bash
brew install redis
redis-server /usr/local/etc/redis.conf
```
You now have a Redis daemon running on port `6379`.
### Setup
```bash
gem install split
```
#### Rails
Adding `gem 'split'` to your Gemfile will autoload it when rails starts up, as long as you've configured Redis it will 'just work'.
#### Sinatra
To configure Sinatra with Split you need to enable sessions and mix in the helper methods. Add the following lines at the top of your Sinatra app:
```ruby
require 'split'
class MySinatraApp < Sinatra::Base
enable :sessions
helpers Split::Helper
get '/' do
...
end
```
## Usage
To begin your A/B test use the `ab_test` method, naming your experiment with the first argument and then the different alternatives which you wish to test on as the other arguments.
`ab_test` returns one of the alternatives, if a user has already seen that test they will get the same alternative as before, which you can use to split your code on.
It can be used to render different templates, show different text or any other case based logic.
`ab_finished` is used to make a completion of an experiment, or conversion.
Example: View
```erb
<% ab_test(:login_button, "/images/button1.jpg", "/images/button2.jpg") do |button_file| %>
<%= image_tag(button_file, alt: "Login!") %>
<% end %>
```
Example: Controller
```ruby
def register_new_user
# See what level of free points maximizes users' decision to buy replacement points.
@starter_points = ab_test(:new_user_free_points, '100', '200', '300')
end
```
Example: Conversion tracking (in a controller!)
```ruby
def buy_new_points
# some business logic
ab_finished(:new_user_free_points)
end
```
Example: Conversion tracking (in a view)
```erb
Thanks for signing up, dude! <% ab_finished(:signup_page_redesign) %>
```
You can find more examples, tutorials and guides on the [wiki](https://github.com/splitrb/split/wiki).
## Statistical Validity
Split has two options for you to use to determine which alternative is the best.
The first option (default on the dashboard) uses a z test (n>30) for the difference between your control and alternative conversion rates to calculate statistical significance. This test will tell you whether an alternative is better or worse than your control, but it will not distinguish between which alternative is the best in an experiment with multiple alternatives. Split will only tell you if your experiment is 90%, 95%, or 99% significant, and this test only works if you have more than 30 participants and 5 conversions for each branch.
As per this [blog post](https://www.evanmiller.org/how-not-to-run-an-ab-test.html) on the pitfalls of A/B testing, it is highly recommended that you determine your requisite sample size for each branch before running the experiment. Otherwise, you'll have an increased rate of false positives (experiments which show a significant effect where really there is none).
[Here](https://www.evanmiller.org/ab-testing/sample-size.html) is a sample size calculator for your convenience.
The second option uses simulations from a beta distribution to determine the probability that the given alternative is the winner compared to all other alternatives. You can view these probabilities by clicking on the drop-down menu labeled "Confidence." This option should be used when the experiment has more than just 1 control and 1 alternative. It can also be used for a simple, 2-alternative A/B test.
Calculating the beta-distribution simulations for a large number of experiments can be slow, so the results are cached. You can specify how often they should be recalculated (the default is once per day).
```ruby
Split.configure do |config|
config.winning_alternative_recalculation_interval = 3600 # 1 hour
end
```
## Extras
### Weighted alternatives
Perhaps you only want to show an alternative to 10% of your visitors because it is very experimental or not yet fully load tested.
To do this you can pass a weight with each alternative in the following ways:
```ruby
ab_test(:homepage_design, {'Old' => 18}, {'New' => 2})
ab_test(:homepage_design, 'Old', {'New' => 1.0/9})
ab_test(:homepage_design, {'Old' => 9}, 'New')
```
This will only show the new alternative to visitors 1 in 10 times, the default weight for an alternative is 1.
### Overriding alternatives
For development and testing, you may wish to force your app to always return an alternative.
You can do this by passing it as a parameter in the url.
If you have an experiment called `button_color` with alternatives called `red` and `blue` used on your homepage, a url such as:
http://myawesomesite.com?ab_test[button_color]=red
will always have red buttons. This won't be stored in your session or count towards to results, unless you set the `store_override` configuration option.
In the event you want to disable all tests without having to know the individual experiment names, add a `SPLIT_DISABLE` query parameter.
http://myawesomesite.com?SPLIT_DISABLE=true
It is not required to send `SPLIT_DISABLE=false` to activate Split.
### Rspec Helper
To aid testing with RSpec, write `spec/support/split_helper.rb` and call `use_ab_test(alternatives_by_experiment)` in your specs as instructed below:
```ruby
# Create a file with these contents at 'spec/support/split_helper.rb'
# and ensure it is `require`d in your rails_helper.rb or spec_helper.rb
module SplitHelper
# Force a specific experiment alternative to always be returned:
# use_ab_test(signup_form: "single_page")
#
# Force alternatives for multiple experiments:
# use_ab_test(signup_form: "single_page", pricing: "show_enterprise_prices")
#
def use_ab_test(alternatives_by_experiment)
allow_any_instance_of(Split::Helper).to receive(:ab_test) do |_receiver, experiment, &block|
variant = alternatives_by_experiment.fetch(experiment) { |key| raise "Unknown experiment '#{key}'" }
block.call(variant) unless block.nil?
variant
end
end
end
# Make the `use_ab_test` method available to all specs:
RSpec.configure do |config|
config.include SplitHelper
end
```
Now you can call `use_ab_test(alternatives_by_experiment)` in your specs, for example:
```ruby
it "registers using experimental signup" do
use_ab_test experiment_name: "alternative_name"
post "/signups"
...
end
```
### Starting experiments manually
By default new A/B tests will be active right after deployment. In case you would like to start new test a while after
the deploy, you can do it by setting the `start_manually` configuration option to `true`.
After choosing this option tests won't be started right after deploy, but after pressing the `Start` button in Split admin dashboard. If a test is deleted from the Split dashboard, then it can only be started after pressing the `Start` button whenever being re-initialized.
### Reset after completion
When a user completes a test their session is reset so that they may start the test again in the future.
To stop this behaviour you can pass the following option to the `ab_finished` method:
```ruby
ab_finished(:experiment_name, reset: false)
```
The user will then always see the alternative they started with.
Any old unfinished experiment key will be deleted from the user's data storage if the experiment had been removed or is over and a winner had been chosen. This allows a user to enroll into any new experiment in cases when the `allow_multiple_experiments` config option is set to `false`.
### Reset experiments manually
By default Split automatically resets the experiment whenever it detects the configuration for an experiment has changed (e.g. you call `ab_test` with different alternatives). You can prevent this by setting the option `reset_manually` to `true`.
You may want to do this when you want to change something, like the variants' names, the metadata about an experiment, etc. without resetting everything.
### Multiple experiments at once
By default Split will avoid users participating in multiple experiments at once. This means you are less likely to skew results by adding in more variation to your tests.
To stop this behaviour and allow users to participate in multiple experiments at once set the `allow_multiple_experiments` config option to true like so:
```ruby
Split.configure do |config|
config.allow_multiple_experiments = true
end
```
This will allow the user to participate in any number of experiments and belong to any alternative in each experiment. This has the possible downside of a variation in one experiment influencing the outcome of another.
To address this, setting the `allow_multiple_experiments` config option to 'control' like so:
```ruby
Split.configure do |config|
config.allow_multiple_experiments = 'control'
end
```
For this to work, each and every experiment you define must have an alternative named 'control'. This will allow the user to participate in multiple experiments as long as the user belongs to the alternative 'control' in each experiment. As soon as the user belongs to an alternative named something other than 'control' the user may not participate in any more experiments. Calling ab_test(<other experiments>) will always return the first alternative without adding the user to that experiment.
### Experiment Persistence
Split comes with three built-in persistence adapters for storing users and the alternatives they've been given for each experiment.
By default Split will store the tests for each user in the session.
You can optionally configure Split to use a cookie, Redis, or any custom adapter of your choosing.
#### Cookies
```ruby
Split.configure do |config|
config.persistence = :cookie
end
```
When using the cookie persistence, Split stores data into an anonymous tracking cookie named 'split', which expires in 1 year. To change that, set the `persistence_cookie_length` in the configuration (unit of time in seconds).
```ruby
Split.configure do |config|
config.persistence = :cookie
config.persistence_cookie_length = 2592000 # 30 days
end
```
The data stored consists of the experiment name and the variants the user is in. Example: { "experiment_name" => "variant_a" }
__Note:__ Using cookies depends on `ActionDispatch::Cookies` or any identical API
#### Redis
Using Redis will allow ab_users to persist across sessions or machines.
```ruby
Split.configure do |config|
config.persistence = Split::Persistence::RedisAdapter.with_config(lookup_by: -> (context) { context.current_user_id })
# Equivalent
# config.persistence = Split::Persistence::RedisAdapter.with_config(lookup_by: :current_user_id)
end
```
Options:
* `lookup_by`: method to invoke per request for uniquely identifying ab_users (mandatory configuration)
* `namespace`: separate namespace to store these persisted values (default "persistence")
* `expire_seconds`: sets TTL for user key. (if a user is in multiple experiments most recent update will reset TTL for all their assignments)
#### Dual Adapter
The Dual Adapter allows the use of different persistence adapters for logged-in and logged-out users. A common use case is to use Redis for logged-in users and Cookies for logged-out users.
```ruby
cookie_adapter = Split::Persistence::CookieAdapter
redis_adapter = Split::Persistence::RedisAdapter.with_config(
lookup_by: -> (context) { context.send(:current_user).try(:id) },
expire_seconds: 2592000)
Split.configure do |config|
config.persistence = Split::Persistence::DualAdapter.with_config(
logged_in: -> (context) { !context.send(:current_user).try(:id).nil? },
logged_in_adapter: redis_adapter,
logged_out_adapter: cookie_adapter)
config.persistence_cookie_length = 2592000 # 30 days
end
```
#### Custom Adapter
Your custom adapter needs to implement the same API as existing adapters.
See `Split::Persistence::CookieAdapter` or `Split::Persistence::SessionAdapter` for a starting point.
```ruby
Split.configure do |config|
config.persistence = YourCustomAdapterClass
end
```
### Trial Event Hooks
You can define methods that will be called at the same time as experiment
alternative participation and goal completion.
For example:
``` ruby
Split.configure do |config|
config.on_trial = :log_trial # run on every trial
config.on_trial_choose = :log_trial_choose # run on trials with new users only
config.on_trial_complete = :log_trial_complete
end
```
Set these attributes to a method name available in the same context as the
`ab_test` method. These methods should accept one argument, a `Trial` instance.
``` ruby
def log_trial(trial)
logger.info "experiment=%s alternative=%s user=%s" %
[ trial.experiment.name, trial.alternative, current_user.id ]
end
def log_trial_choose(trial)
logger.info "[new user] experiment=%s alternative=%s user=%s" %
[ trial.experiment.name, trial.alternative, current_user.id ]
end
def log_trial_complete(trial)
logger.info "experiment=%s alternative=%s user=%s complete=true" %
[ trial.experiment.name, trial.alternative, current_user.id ]
end
```
#### Views
If you are running `ab_test` from a view, you must define your event
hook callback as a
[helper_method](https://apidock.com/rails/AbstractController/Helpers/ClassMethods/helper_method)
in the controller:
``` ruby
helper_method :log_trial_choose
def log_trial_choose(trial)
logger.info "experiment=%s alternative=%s user=%s" %
[ trial.experiment.name, trial.alternative, current_user.id ]
end
```
### Experiment Hooks
You can assign a proc that will be called when an experiment is reset or deleted. You can use these hooks to call methods within your application to keep data related to experiments in sync with Split.
For example:
``` ruby
Split.configure do |config|
# after experiment reset or deleted
config.on_experiment_reset = -> (example) { # Do something on reset }
config.on_experiment_delete = -> (experiment) { # Do something else on delete }
# before experiment reset or deleted
config.on_before_experiment_reset = -> (example) { # Do something on reset }
config.on_before_experiment_delete = -> (experiment) { # Do something else on delete }
# after experiment winner had been set
config.on_experiment_winner_choose = -> (experiment) { # Do something on winner choose }
end
```
## Web Interface
Split comes with a Sinatra-based front end to get an overview of how your experiments are doing.
If you are running Rails 2: You can mount this inside your app using Rack::URLMap in your `config.ru`
```ruby
require 'split/dashboard'
run Rack::URLMap.new \
"/" => Your::App.new,
"/split" => Split::Dashboard.new
```
However, if you are using Rails 3 or higher: You can mount this inside your app routes by first adding this to the Gemfile:
```ruby
gem 'split', require: 'split/dashboard'
```
Then adding this to config/routes.rb
```ruby
mount Split::Dashboard, at: 'split'
```
You may want to password protect that page, you can do so with `Rack::Auth::Basic` (in your split initializer file)
```ruby
# Rails apps or apps that already depend on activesupport
Split::Dashboard.use Rack::Auth::Basic do |username, password|
# Protect against timing attacks:
# - Use & (do not use &&) so that it doesn't short circuit.
# - Use digests to stop length information leaking
ActiveSupport::SecurityUtils.secure_compare(::Digest::SHA256.hexdigest(username), ::Digest::SHA256.hexdigest(ENV["SPLIT_USERNAME"])) &
ActiveSupport::SecurityUtils.secure_compare(::Digest::SHA256.hexdigest(password), ::Digest::SHA256.hexdigest(ENV["SPLIT_PASSWORD"]))
end
# Apps without activesupport
Split::Dashboard.use Rack::Auth::Basic do |username, password|
# Protect against timing attacks:
# - Use & (do not use &&) so that it doesn't short circuit.
# - Use digests to stop length information leaking
Rack::Utils.secure_compare(::Digest::SHA256.hexdigest(username), ::Digest::SHA256.hexdigest(ENV["SPLIT_USERNAME"])) &
Rack::Utils.secure_compare(::Digest::SHA256.hexdigest(password), ::Digest::SHA256.hexdigest(ENV["SPLIT_PASSWORD"]))
end
```
You can even use Devise or any other Warden-based authentication method to authorize users. Just replace `mount Split::Dashboard, :at => 'split'` in `config/routes.rb` with the following:
```ruby
match "/split" => Split::Dashboard, anchor: false, via: [:get, :post, :delete], constraints: -> (request) do
request.env['warden'].authenticated? # are we authenticated?
request.env['warden'].authenticate! # authenticate if not already
# or even check any other condition such as request.env['warden'].user.is_admin?
end
```
More information on this [here](https://steve.dynedge.co.uk/2011/12/09/controlling-access-to-routes-and-rack-apps-in-rails-3-with-devise-and-warden/)
### Screenshot

## Configuration
You can override the default configuration options of Split like so:
```ruby
Split.configure do |config|
config.db_failover = true # handle Redis errors gracefully
config.db_failover_on_db_error = -> (error) { Rails.logger.error(error.message) }
config.allow_multiple_experiments = true
config.enabled = true
config.persistence = Split::Persistence::SessionAdapter
#config.start_manually = false ## new test will have to be started manually from the admin panel. default false
#config.reset_manually = false ## if true, it never resets the experiment data, even if the configuration changes
config.include_rails_helper = true
config.redis = "redis://custom.redis.url:6380"
end
```
Split looks for the Redis host in the environment variable `REDIS_URL` then
defaults to `redis://localhost:6379` if not specified by configure block.
On platforms like Heroku, Split will use the value of `REDIS_PROVIDER` to
determine which env variable key to use when retrieving the host config. This
defaults to `REDIS_URL`.
### Filtering
In most scenarios you don't want to have AB-Testing enabled for web spiders, robots or special groups of users.
Split provides functionality to filter this based on a predefined, extensible list of bots, IP-lists or custom exclude logic.
```ruby
Split.configure do |config|
# bot config
config.robot_regex = /my_custom_robot_regex/ # or
config.bots['newbot'] = "Description for bot with 'newbot' user agent, which will be added to config.robot_regex for exclusion"
# IP config
config.ignore_ip_addresses << '81.19.48.130' # or regex: /81\.19\.48\.[0-9]+/
# or provide your own filter functionality, the default is proc{ |request| is_robot? || is_ignored_ip_address? || is_preview? }
config.ignore_filter = -> (request) { CustomExcludeLogic.excludes?(request) }
end
```
### Experiment configuration
Instead of providing the experiment options inline, you can store them
in a hash. This hash can control your experiment's alternatives, weights,
algorithm and if the experiment resets once finished:
```ruby
Split.configure do |config|
config.experiments = {
my_first_experiment: {
alternatives: ["a", "b"],
resettable: false
},
:my_second_experiment => {
algorithm: 'Split::Algorithms::Whiplash',
alternatives: [
{ name: "a", percent: 67 },
{ name: "b", percent: 33 }
]
}
}
end
```
You can also store your experiments in a YAML file:
```ruby
Split.configure do |config|
config.experiments = YAML.load_file "config/experiments.yml"
end
```
You can then define the YAML file like:
```yaml
my_first_experiment:
alternatives:
- a
- b
my_second_experiment:
alternatives:
- name: a
And our initializer:
```ruby
rails_root = ENV['RAILS_ROOT'] || File.dirname(__FILE__) + '/../..'
rails_env = ENV['RAILS_ENV'] || 'development'
split_config = YAML.load_file(rails_root + '/config/split.yml')
Split.redis = split_config[rails_env]
```
## Namespaces
```ruby
ab_test(:my_first_experiment)
```
and:
```ruby
ab_finished(:my_first_experiment)
```
You can also add meta data for each experiment, which is very useful when you need more than an alternative name to change behaviour:
```ruby
Split.configure do |config|
config.experiments = {
my_first_experiment: {
alternatives: ["a", "b"],
metadata: {
"a" => {"text" => "Have a fantastic day"},
"b" => {"text" => "Don't get hit by a bus"}
}
}
}
end
```
```yaml
my_first_experiment:
alternatives:
- a
- b
metadata:
a:
text: "Have a fantastic day"
b:
text: "Don't get hit by a bus"
```
This allows for some advanced experiment configuration using methods like:
```ruby
trial.alternative.name # => "a"
trial.metadata['text'] # => "Have a fantastic day"
```
or in views:
```erb
<% ab_test("my_first_experiment") do |alternative, meta| %>
<%= alternative %>
<small><%= meta['text'] %></small>
<% end %>
```
The keys used in meta data should be Strings
#### Metrics
You might wish to track generic metrics, such as conversions, and use
those to complete multiple different experiments without adding more to
your code. You can use the configuration hash to do this, thanks to
the `:metric` option.
```ruby
Split.configure do |config|
config.experiments = {
my_first_experiment: {
alternatives: ["a", "b"],
metric: :my_metric
}
}
end
```
Your code may then track a completion using the metric instead of
the experiment name:
```ruby
ab_finished(:my_metric)
```
You can also create a new metric by instantiating and saving a new Metric object.
```ruby
Split::Metric.new(:my_metric)
Split::Metric.save
```
#### Goals
You might wish to allow an experiment to have multiple, distinguishable goals.
The API to define goals for an experiment is this:
```ruby
ab_test({link_color: ["purchase", "refund"]}, "red", "blue")
```
or you can define them in a configuration file:
```ruby
Split.configure do |config|
config.experiments = {
link_color: {
alternatives: ["red", "blue"],
goals: ["purchase", "refund"]
}
}
end
```
To complete a goal conversion, you do it like:
```ruby
ab_finished(link_color: "purchase")
```
Note that if you pass additional options, that should be a separate hash:
```ruby
ab_finished({ link_color: "purchase" }, reset: false)
```
**NOTE:** This does not mean that a single experiment can complete more than one goal.
Once you finish one of the goals, the test is considered to be completed, and finishing the other goal will no longer register. (Assuming the test runs with `reset: false`.)
**Good Example**: Test if listing Plan A first result in more conversions to Plan A (goal: "plana_conversion") or Plan B (goal: "planb_conversion").
**Bad Example**: Test if button color increases conversion rate through multiple steps of a funnel. THIS WILL NOT WORK.
**Bad Example**: Test both how button color affects signup *and* how it affects login, at the same time. THIS WILL NOT WORK.
#### Combined Experiments
If you want to test how button color affects signup *and* how it affects login at the same time, use combined experiments.
Configure like so:
```ruby
Split.configuration.experiments = {
:button_color_experiment => {
:alternatives => ["blue", "green"],
:combined_experiments => ["button_color_on_signup", "button_color_on_login"]
}
}
```
Starting the combined test starts all combined experiments
```ruby
ab_combined_test(:button_color_experiment)
```
Finish each combined test as normal
```ruby
ab_finished(:button_color_on_login)
ab_finished(:button_color_on_signup)
```
**Additional Configuration**:
* Be sure to enable `allow_multiple_experiments`
* In Sinatra include the CombinedExperimentsHelper
```
helpers Split::CombinedExperimentsHelper
```
### DB failover solution
Due to the fact that Redis has no automatic failover mechanism, it's
possible to switch on the `db_failover` config option, so that `ab_test`
and `ab_finished` will not crash in case of a db failure. `ab_test` always
delivers alternative A (the first one) in that case.
It's also possible to set a `db_failover_on_db_error` callback (proc)
for example to log these errors via Rails.logger.
### Redis
You may want to change the Redis host and port Split connects to, or
set various other options at startup.
Split has a `redis` setter which can be given a string or a Redis
object. This means if you're already using Redis in your app, Split
can re-use the existing connection.
String: `Split.redis = 'redis://localhost:6379'`
Redis: `Split.redis = $redis`
For our rails app we have a `config/initializers/split.rb` file where
we load `config/split.yml` by hand and set the Redis information
appropriately.
Here's our `config/split.yml`:
```yml
development: redis://localhost:6379
test: redis://localhost:6379
staging: redis://redis1.example.com:6379
fi: redis://localhost:6379
production: redis://redis1.example.com:6379
```
And our initializer:
```ruby
split_config = YAML.load_file(Rails.root.join('config', 'split.yml'))
Split.redis = split_config[Rails.env]
```
### Redis Caching (v4.0+)
In some high-volume usage scenarios, Redis load can be incurred by repeated
fetches for fairly static data. Enabling caching will reduce this load.
```ruby
Split.configuration.cache = true
````
This currently caches:
- `Split::ExperimentCatalog.find`
- `Split::Experiment.start_time`
- `Split::Experiment.winner`
## Namespaces
If you're running multiple, separate instances of Split you may want
to namespace the keyspaces so they do not overlap. This is not unlike
the approach taken by many memcached clients.
This feature can be provided by the [redis-namespace](https://github.com/defunkt/redis-namespace)
library. To configure Split to use `Redis::Namespace`, do the following:
1. Add `redis-namespace` to your Gemfile:
```ruby
gem 'redis-namespace'
```
2. Configure `Split.redis` to use a `Redis::Namespace` instance (possible in an
initializer):
```ruby
redis = Redis.new(url: ENV['REDIS_URL']) # or whatever config you want
Split.redis = Redis::Namespace.new(:your_namespace, redis: redis)
```
## Outside of a Web Session
Split provides the Helper module to facilitate running experiments inside web sessions.
Alternatively, you can access the underlying Metric, Trial, Experiment and Alternative objects to
conduct experiments that are not tied to a web session.
```ruby
# create a new experiment
experiment = Split::ExperimentCatalog.find_or_create('color', 'red', 'blue')
# create a new trial
trial = Split::Trial.new(:experiment => experiment)
# run trial
trial.choose!
# get the result, returns either red or blue
trial.alternative.name
# if the goal has been achieved, increment the successful completions for this alternative.
if goal_achieved?
trial.complete!
end
```
## Algorithms
By default, Split ships with `Split::Algorithms::WeightedSample` that randomly selects from possible alternatives for a traditional a/b test.
It is possible to specify static weights to favor certain alternatives.
`Split::Algorithms::Whiplash` is an implementation of a [multi-armed bandit algorithm](http://stevehanov.ca/blog/index.php?id=132).
This algorithm will automatically weight the alternatives based on their relative performance,
choosing the better-performing ones more often as trials are completed.
`Split::Algorithms::BlockRandomization` is an algorithm that ensures equal
participation across all alternatives. This algorithm will choose the alternative
with the fewest participants. In the event of multiple minimum participant alternatives
(i.e. starting a new "Block") the algorithm will choose a random alternative from
those minimum participant alternatives.
Users may also write their own algorithms. The default algorithm may be specified globally in the configuration file, or on a per experiment basis using the experiments hash of the configuration file.
To change the algorithm globally for all experiments, use the following in your initializer:
```ruby
Split.configure do |config|
config.algorithm = Split::Algorithms::Whiplash
end
```
## Extensions
- [Split::Export](https://github.com/splitrb/split-export) - Easily export A/B test data out of Split.
- [Split::Analytics](https://github.com/splitrb/split-analytics) - Push test data to Google Analytics.
- [Split::Mongoid](https://github.com/MongoHQ/split-mongoid) - Store experiment data in mongoid (still uses redis).
- [Split::Cacheable](https://github.com/harrystech/split_cacheable) - Automatically create cache buckets per test.
- [Split::Counters](https://github.com/bernardkroes/split-counters) - Add counters per experiment and alternative.
- [Split::Cli](https://github.com/craigmcnamara/split-cli) - A CLI to trigger Split A/B tests.
## Screencast
Ryan bates has produced an excellent 10 minute screencast about split on the Railscasts site: [A/B Testing with Split](http://railscasts.com/episodes/331-a-b-testing-with-split)
## Blogposts
* [Recipe: A/B testing with KISSMetrics and the split gem](https://robots.thoughtbot.com/post/9595887299/recipe-a-b-testing-with-kissmetrics-and-the-split-gem)
* [Rails A/B testing with Split on Heroku](http://blog.nathanhumbert.com/2012/02/rails-ab-testing-with-split-on-heroku.html)
## Backers
Support us with a monthly donation and help us continue our activities. [[Become a backer](https://opencollective.com/split#backer)]
<a href="https://opencollective.com/split/backer/0/website" target="_blank"><img src="https://opencollective.com/split/backer/0/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/1/website" target="_blank"><img src="https://opencollective.com/split/backer/1/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/2/website" target="_blank"><img src="https://opencollective.com/split/backer/2/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/3/website" target="_blank"><img src="https://opencollective.com/split/backer/3/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/4/website" target="_blank"><img src="https://opencollective.com/split/backer/4/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/5/website" target="_blank"><img src="https://opencollective.com/split/backer/5/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/6/website" target="_blank"><img src="https://opencollective.com/split/backer/6/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/7/website" target="_blank"><img src="https://opencollective.com/split/backer/7/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/8/website" target="_blank"><img src="https://opencollective.com/split/backer/8/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/9/website" target="_blank"><img src="https://opencollective.com/split/backer/9/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/10/website" target="_blank"><img src="https://opencollective.com/split/backer/10/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/11/website" target="_blank"><img src="https://opencollective.com/split/backer/11/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/12/website" target="_blank"><img src="https://opencollective.com/split/backer/12/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/13/website" target="_blank"><img src="https://opencollective.com/split/backer/13/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/14/website" target="_blank"><img src="https://opencollective.com/split/backer/14/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/15/website" target="_blank"><img src="https://opencollective.com/split/backer/15/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/16/website" target="_blank"><img src="https://opencollective.com/split/backer/16/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/17/website" target="_blank"><img src="https://opencollective.com/split/backer/17/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/18/website" target="_blank"><img src="https://opencollective.com/split/backer/18/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/19/website" target="_blank"><img src="https://opencollective.com/split/backer/19/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/20/website" target="_blank"><img src="https://opencollective.com/split/backer/20/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/21/website" target="_blank"><img src="https://opencollective.com/split/backer/21/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/22/website" target="_blank"><img src="https://opencollective.com/split/backer/22/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/23/website" target="_blank"><img src="https://opencollective.com/split/backer/23/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/24/website" target="_blank"><img src="https://opencollective.com/split/backer/24/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/25/website" target="_blank"><img src="https://opencollective.com/split/backer/25/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/26/website" target="_blank"><img src="https://opencollective.com/split/backer/26/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/27/website" target="_blank"><img src="https://opencollective.com/split/backer/27/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/28/website" target="_blank"><img src="https://opencollective.com/split/backer/28/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/29/website" target="_blank"><img src="https://opencollective.com/split/backer/29/avatar.svg"></a>
## Sponsors
Become a sponsor and get your logo on our README on Github with a link to your site. [[Become a sponsor](https://opencollective.com/split#sponsor)]
<a href="https://opencollective.com/split/sponsor/0/website" target="_blank"><img src="https://opencollective.com/split/sponsor/0/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/1/website" target="_blank"><img src="https://opencollective.com/split/sponsor/1/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/2/website" target="_blank"><img src="https://opencollective.com/split/sponsor/2/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/3/website" target="_blank"><img src="https://opencollective.com/split/sponsor/3/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/4/website" target="_blank"><img src="https://opencollective.com/split/sponsor/4/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/5/website" target="_blank"><img src="https://opencollective.com/split/sponsor/5/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/6/website" target="_blank"><img src="https://opencollective.com/split/sponsor/6/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/7/website" target="_blank"><img src="https://opencollective.com/split/sponsor/7/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/8/website" target="_blank"><img src="https://opencollective.com/split/sponsor/8/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/9/website" target="_blank"><img src="https://opencollective.com/split/sponsor/9/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/10/website" target="_blank"><img src="https://opencollective.com/split/sponsor/10/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/11/website" target="_blank"><img src="https://opencollective.com/split/sponsor/11/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/12/website" target="_blank"><img src="https://opencollective.com/split/sponsor/12/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/13/website" target="_blank"><img src="https://opencollective.com/split/sponsor/13/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/14/website" target="_blank"><img src="https://opencollective.com/split/sponsor/14/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/15/website" target="_blank"><img src="https://opencollective.com/split/sponsor/15/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/16/website" target="_blank"><img src="https://opencollective.com/split/sponsor/16/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/17/website" target="_blank"><img src="https://opencollective.com/split/sponsor/17/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/18/website" target="_blank"><img src="https://opencollective.com/split/sponsor/18/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/19/website" target="_blank"><img src="https://opencollective.com/split/sponsor/19/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/20/website" target="_blank"><img src="https://opencollective.com/split/sponsor/20/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/21/website" target="_blank"><img src="https://opencollective.com/split/sponsor/21/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/22/website" target="_blank"><img src="https://opencollective.com/split/sponsor/22/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/23/website" target="_blank"><img src="https://opencollective.com/split/sponsor/23/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/24/website" target="_blank"><img src="https://opencollective.com/split/sponsor/24/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/25/website" target="_blank"><img src="https://opencollective.com/split/sponsor/25/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/26/website" target="_blank"><img src="https://opencollective.com/split/sponsor/26/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/27/website" target="_blank"><img src="https://opencollective.com/split/sponsor/27/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/28/website" target="_blank"><img src="https://opencollective.com/split/sponsor/28/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/29/website" target="_blank"><img src="https://opencollective.com/split/sponsor/29/avatar.svg"></a>
## Contribute
Please do! Over 70 different people have contributed to the project, you can see them all here: https://github.com/splitrb/split/graphs/contributors.
### Development
The source code is hosted at [GitHub](https://github.com/splitrb/split).
Report issues and feature requests on [GitHub Issues](https://github.com/splitrb/split/issues).
You can find a discussion form on [Google Groups](https://groups.google.com/d/forum/split-ruby).
### Tests
Run the tests like this:
# Start a Redis server in another tab.
redis-server
bundle
rake spec
### A Note on Patches and Pull Requests
* Fork the project.
* Make your feature addition or bug fix.
* Add tests for it. This is important so I don't break it in a
future version unintentionally.
* Add documentation if necessary.
* Commit. Do not mess with the rakefile, version, or history.
(If you want to have your own version, that is fine. But bump the version in a commit by itself, which I can ignore when I pull.)
* Send a pull request. Bonus points for topic branches.
### Code of Conduct
Please note that this project is released with a [Contributor Code of Conduct](CODE_OF_CONDUCT.md). By participating in this project you agree to abide by its terms.
## Copyright
[MIT License](LICENSE) © 2019 [Andrew Nesbitt](https://github.com/andrew).
<MSG> update README with new syntax
<DFF> @@ -549,11 +549,8 @@ production: redis1.example.com:6379
And our initializer:
```ruby
-rails_root = ENV['RAILS_ROOT'] || File.dirname(__FILE__) + '/../..'
-rails_env = ENV['RAILS_ENV'] || 'development'
-
-split_config = YAML.load_file(rails_root + '/config/split.yml')
-Split.redis = split_config[rails_env]
+split_config = YAML.load_file(Rails.root.join('config', 'split.yml'))
+Split.redis = split_config[Rails.env]
```
## Namespaces
| 2 | update README with new syntax | 5 | .md | md | mit | splitrb/split |
10071081 | <NME> README.md
<BEF> # [Split](https://libraries.io/rubygems/split)
[](http://badge.fury.io/rb/split)

[](https://codeclimate.com/github/splitrb/split)
[](https://codeclimate.com/github/splitrb/split/coverage)
[](https://github.com/RichardLitt/standard-readme)
[](https://www.codetriage.com/splitrb/split)
> 📈 The Rack Based A/B testing framework https://libraries.io/rubygems/split
Split is a rack based A/B testing framework designed to work with Rails, Sinatra or any other rack based app.
Split is heavily inspired by the [Abingo](https://github.com/ryanb/abingo) and [Vanity](https://github.com/assaf/vanity) Rails A/B testing plugins and [Resque](https://github.com/resque/resque) in its use of Redis.
Split is designed to be hacker friendly, allowing for maximum customisation and extensibility.
## Install
### Requirements
Split v4.0+ is currently tested with Ruby >= 2.5 and Rails >= 5.2.
If your project requires compatibility with Ruby 2.4.x or older Rails versions. You can try v3.0 or v0.8.0(for Ruby 1.9.3)
Split uses Redis as a datastore.
Split only supports Redis 4.0 or greater.
If you're on OS X, Homebrew is the simplest way to install Redis:
```bash
brew install redis
redis-server /usr/local/etc/redis.conf
```
You now have a Redis daemon running on port `6379`.
### Setup
```bash
gem install split
```
#### Rails
Adding `gem 'split'` to your Gemfile will autoload it when rails starts up, as long as you've configured Redis it will 'just work'.
#### Sinatra
To configure Sinatra with Split you need to enable sessions and mix in the helper methods. Add the following lines at the top of your Sinatra app:
```ruby
require 'split'
class MySinatraApp < Sinatra::Base
enable :sessions
helpers Split::Helper
get '/' do
...
end
```
## Usage
To begin your A/B test use the `ab_test` method, naming your experiment with the first argument and then the different alternatives which you wish to test on as the other arguments.
`ab_test` returns one of the alternatives, if a user has already seen that test they will get the same alternative as before, which you can use to split your code on.
It can be used to render different templates, show different text or any other case based logic.
`ab_finished` is used to make a completion of an experiment, or conversion.
Example: View
```erb
<% ab_test(:login_button, "/images/button1.jpg", "/images/button2.jpg") do |button_file| %>
<%= image_tag(button_file, alt: "Login!") %>
<% end %>
```
Example: Controller
```ruby
def register_new_user
# See what level of free points maximizes users' decision to buy replacement points.
@starter_points = ab_test(:new_user_free_points, '100', '200', '300')
end
```
Example: Conversion tracking (in a controller!)
```ruby
def buy_new_points
# some business logic
ab_finished(:new_user_free_points)
end
```
Example: Conversion tracking (in a view)
```erb
Thanks for signing up, dude! <% ab_finished(:signup_page_redesign) %>
```
You can find more examples, tutorials and guides on the [wiki](https://github.com/splitrb/split/wiki).
## Statistical Validity
Split has two options for you to use to determine which alternative is the best.
The first option (default on the dashboard) uses a z test (n>30) for the difference between your control and alternative conversion rates to calculate statistical significance. This test will tell you whether an alternative is better or worse than your control, but it will not distinguish between which alternative is the best in an experiment with multiple alternatives. Split will only tell you if your experiment is 90%, 95%, or 99% significant, and this test only works if you have more than 30 participants and 5 conversions for each branch.
As per this [blog post](https://www.evanmiller.org/how-not-to-run-an-ab-test.html) on the pitfalls of A/B testing, it is highly recommended that you determine your requisite sample size for each branch before running the experiment. Otherwise, you'll have an increased rate of false positives (experiments which show a significant effect where really there is none).
[Here](https://www.evanmiller.org/ab-testing/sample-size.html) is a sample size calculator for your convenience.
The second option uses simulations from a beta distribution to determine the probability that the given alternative is the winner compared to all other alternatives. You can view these probabilities by clicking on the drop-down menu labeled "Confidence." This option should be used when the experiment has more than just 1 control and 1 alternative. It can also be used for a simple, 2-alternative A/B test.
Calculating the beta-distribution simulations for a large number of experiments can be slow, so the results are cached. You can specify how often they should be recalculated (the default is once per day).
```ruby
Split.configure do |config|
config.winning_alternative_recalculation_interval = 3600 # 1 hour
end
```
## Extras
### Weighted alternatives
Perhaps you only want to show an alternative to 10% of your visitors because it is very experimental or not yet fully load tested.
To do this you can pass a weight with each alternative in the following ways:
```ruby
ab_test(:homepage_design, {'Old' => 18}, {'New' => 2})
ab_test(:homepage_design, 'Old', {'New' => 1.0/9})
ab_test(:homepage_design, {'Old' => 9}, 'New')
```
This will only show the new alternative to visitors 1 in 10 times, the default weight for an alternative is 1.
### Overriding alternatives
For development and testing, you may wish to force your app to always return an alternative.
You can do this by passing it as a parameter in the url.
If you have an experiment called `button_color` with alternatives called `red` and `blue` used on your homepage, a url such as:
http://myawesomesite.com?ab_test[button_color]=red
will always have red buttons. This won't be stored in your session or count towards to results, unless you set the `store_override` configuration option.
In the event you want to disable all tests without having to know the individual experiment names, add a `SPLIT_DISABLE` query parameter.
http://myawesomesite.com?SPLIT_DISABLE=true
It is not required to send `SPLIT_DISABLE=false` to activate Split.
### Rspec Helper
To aid testing with RSpec, write `spec/support/split_helper.rb` and call `use_ab_test(alternatives_by_experiment)` in your specs as instructed below:
```ruby
# Create a file with these contents at 'spec/support/split_helper.rb'
# and ensure it is `require`d in your rails_helper.rb or spec_helper.rb
module SplitHelper
# Force a specific experiment alternative to always be returned:
# use_ab_test(signup_form: "single_page")
#
# Force alternatives for multiple experiments:
# use_ab_test(signup_form: "single_page", pricing: "show_enterprise_prices")
#
def use_ab_test(alternatives_by_experiment)
allow_any_instance_of(Split::Helper).to receive(:ab_test) do |_receiver, experiment, &block|
variant = alternatives_by_experiment.fetch(experiment) { |key| raise "Unknown experiment '#{key}'" }
block.call(variant) unless block.nil?
variant
end
end
end
# Make the `use_ab_test` method available to all specs:
RSpec.configure do |config|
config.include SplitHelper
end
```
Now you can call `use_ab_test(alternatives_by_experiment)` in your specs, for example:
```ruby
it "registers using experimental signup" do
use_ab_test experiment_name: "alternative_name"
post "/signups"
...
end
```
### Starting experiments manually
By default new A/B tests will be active right after deployment. In case you would like to start new test a while after
the deploy, you can do it by setting the `start_manually` configuration option to `true`.
After choosing this option tests won't be started right after deploy, but after pressing the `Start` button in Split admin dashboard. If a test is deleted from the Split dashboard, then it can only be started after pressing the `Start` button whenever being re-initialized.
### Reset after completion
When a user completes a test their session is reset so that they may start the test again in the future.
To stop this behaviour you can pass the following option to the `ab_finished` method:
```ruby
ab_finished(:experiment_name, reset: false)
```
The user will then always see the alternative they started with.
Any old unfinished experiment key will be deleted from the user's data storage if the experiment had been removed or is over and a winner had been chosen. This allows a user to enroll into any new experiment in cases when the `allow_multiple_experiments` config option is set to `false`.
### Reset experiments manually
By default Split automatically resets the experiment whenever it detects the configuration for an experiment has changed (e.g. you call `ab_test` with different alternatives). You can prevent this by setting the option `reset_manually` to `true`.
You may want to do this when you want to change something, like the variants' names, the metadata about an experiment, etc. without resetting everything.
### Multiple experiments at once
By default Split will avoid users participating in multiple experiments at once. This means you are less likely to skew results by adding in more variation to your tests.
To stop this behaviour and allow users to participate in multiple experiments at once set the `allow_multiple_experiments` config option to true like so:
```ruby
Split.configure do |config|
config.allow_multiple_experiments = true
end
```
This will allow the user to participate in any number of experiments and belong to any alternative in each experiment. This has the possible downside of a variation in one experiment influencing the outcome of another.
To address this, setting the `allow_multiple_experiments` config option to 'control' like so:
```ruby
Split.configure do |config|
config.allow_multiple_experiments = 'control'
end
```
For this to work, each and every experiment you define must have an alternative named 'control'. This will allow the user to participate in multiple experiments as long as the user belongs to the alternative 'control' in each experiment. As soon as the user belongs to an alternative named something other than 'control' the user may not participate in any more experiments. Calling ab_test(<other experiments>) will always return the first alternative without adding the user to that experiment.
### Experiment Persistence
Split comes with three built-in persistence adapters for storing users and the alternatives they've been given for each experiment.
By default Split will store the tests for each user in the session.
You can optionally configure Split to use a cookie, Redis, or any custom adapter of your choosing.
#### Cookies
```ruby
Split.configure do |config|
config.persistence = :cookie
end
```
When using the cookie persistence, Split stores data into an anonymous tracking cookie named 'split', which expires in 1 year. To change that, set the `persistence_cookie_length` in the configuration (unit of time in seconds).
```ruby
Split.configure do |config|
config.persistence = :cookie
config.persistence_cookie_length = 2592000 # 30 days
end
```
The data stored consists of the experiment name and the variants the user is in. Example: { "experiment_name" => "variant_a" }
__Note:__ Using cookies depends on `ActionDispatch::Cookies` or any identical API
#### Redis
Using Redis will allow ab_users to persist across sessions or machines.
```ruby
Split.configure do |config|
config.persistence = Split::Persistence::RedisAdapter.with_config(lookup_by: -> (context) { context.current_user_id })
# Equivalent
# config.persistence = Split::Persistence::RedisAdapter.with_config(lookup_by: :current_user_id)
end
```
Options:
* `lookup_by`: method to invoke per request for uniquely identifying ab_users (mandatory configuration)
* `namespace`: separate namespace to store these persisted values (default "persistence")
* `expire_seconds`: sets TTL for user key. (if a user is in multiple experiments most recent update will reset TTL for all their assignments)
#### Dual Adapter
The Dual Adapter allows the use of different persistence adapters for logged-in and logged-out users. A common use case is to use Redis for logged-in users and Cookies for logged-out users.
```ruby
cookie_adapter = Split::Persistence::CookieAdapter
redis_adapter = Split::Persistence::RedisAdapter.with_config(
lookup_by: -> (context) { context.send(:current_user).try(:id) },
expire_seconds: 2592000)
Split.configure do |config|
config.persistence = Split::Persistence::DualAdapter.with_config(
logged_in: -> (context) { !context.send(:current_user).try(:id).nil? },
logged_in_adapter: redis_adapter,
logged_out_adapter: cookie_adapter)
config.persistence_cookie_length = 2592000 # 30 days
end
```
#### Custom Adapter
Your custom adapter needs to implement the same API as existing adapters.
See `Split::Persistence::CookieAdapter` or `Split::Persistence::SessionAdapter` for a starting point.
```ruby
Split.configure do |config|
config.persistence = YourCustomAdapterClass
end
```
### Trial Event Hooks
You can define methods that will be called at the same time as experiment
alternative participation and goal completion.
For example:
``` ruby
Split.configure do |config|
config.on_trial = :log_trial # run on every trial
config.on_trial_choose = :log_trial_choose # run on trials with new users only
config.on_trial_complete = :log_trial_complete
end
```
Set these attributes to a method name available in the same context as the
`ab_test` method. These methods should accept one argument, a `Trial` instance.
``` ruby
def log_trial(trial)
logger.info "experiment=%s alternative=%s user=%s" %
[ trial.experiment.name, trial.alternative, current_user.id ]
end
def log_trial_choose(trial)
logger.info "[new user] experiment=%s alternative=%s user=%s" %
[ trial.experiment.name, trial.alternative, current_user.id ]
end
def log_trial_complete(trial)
logger.info "experiment=%s alternative=%s user=%s complete=true" %
[ trial.experiment.name, trial.alternative, current_user.id ]
end
```
#### Views
If you are running `ab_test` from a view, you must define your event
hook callback as a
[helper_method](https://apidock.com/rails/AbstractController/Helpers/ClassMethods/helper_method)
in the controller:
``` ruby
helper_method :log_trial_choose
def log_trial_choose(trial)
logger.info "experiment=%s alternative=%s user=%s" %
[ trial.experiment.name, trial.alternative, current_user.id ]
end
```
### Experiment Hooks
You can assign a proc that will be called when an experiment is reset or deleted. You can use these hooks to call methods within your application to keep data related to experiments in sync with Split.
For example:
``` ruby
Split.configure do |config|
# after experiment reset or deleted
config.on_experiment_reset = -> (example) { # Do something on reset }
config.on_experiment_delete = -> (experiment) { # Do something else on delete }
# before experiment reset or deleted
config.on_before_experiment_reset = -> (example) { # Do something on reset }
config.on_before_experiment_delete = -> (experiment) { # Do something else on delete }
# after experiment winner had been set
config.on_experiment_winner_choose = -> (experiment) { # Do something on winner choose }
end
```
## Web Interface
Split comes with a Sinatra-based front end to get an overview of how your experiments are doing.
If you are running Rails 2: You can mount this inside your app using Rack::URLMap in your `config.ru`
```ruby
require 'split/dashboard'
run Rack::URLMap.new \
"/" => Your::App.new,
"/split" => Split::Dashboard.new
```
However, if you are using Rails 3 or higher: You can mount this inside your app routes by first adding this to the Gemfile:
```ruby
gem 'split', require: 'split/dashboard'
```
Then adding this to config/routes.rb
```ruby
mount Split::Dashboard, at: 'split'
```
You may want to password protect that page, you can do so with `Rack::Auth::Basic` (in your split initializer file)
```ruby
# Rails apps or apps that already depend on activesupport
Split::Dashboard.use Rack::Auth::Basic do |username, password|
# Protect against timing attacks:
# - Use & (do not use &&) so that it doesn't short circuit.
# - Use digests to stop length information leaking
ActiveSupport::SecurityUtils.secure_compare(::Digest::SHA256.hexdigest(username), ::Digest::SHA256.hexdigest(ENV["SPLIT_USERNAME"])) &
ActiveSupport::SecurityUtils.secure_compare(::Digest::SHA256.hexdigest(password), ::Digest::SHA256.hexdigest(ENV["SPLIT_PASSWORD"]))
end
# Apps without activesupport
Split::Dashboard.use Rack::Auth::Basic do |username, password|
# Protect against timing attacks:
# - Use & (do not use &&) so that it doesn't short circuit.
# - Use digests to stop length information leaking
Rack::Utils.secure_compare(::Digest::SHA256.hexdigest(username), ::Digest::SHA256.hexdigest(ENV["SPLIT_USERNAME"])) &
Rack::Utils.secure_compare(::Digest::SHA256.hexdigest(password), ::Digest::SHA256.hexdigest(ENV["SPLIT_PASSWORD"]))
end
```
You can even use Devise or any other Warden-based authentication method to authorize users. Just replace `mount Split::Dashboard, :at => 'split'` in `config/routes.rb` with the following:
```ruby
match "/split" => Split::Dashboard, anchor: false, via: [:get, :post, :delete], constraints: -> (request) do
request.env['warden'].authenticated? # are we authenticated?
request.env['warden'].authenticate! # authenticate if not already
# or even check any other condition such as request.env['warden'].user.is_admin?
end
```
More information on this [here](https://steve.dynedge.co.uk/2011/12/09/controlling-access-to-routes-and-rack-apps-in-rails-3-with-devise-and-warden/)
### Screenshot

## Configuration
You can override the default configuration options of Split like so:
```ruby
Split.configure do |config|
config.db_failover = true # handle Redis errors gracefully
config.db_failover_on_db_error = -> (error) { Rails.logger.error(error.message) }
config.allow_multiple_experiments = true
config.enabled = true
config.persistence = Split::Persistence::SessionAdapter
#config.start_manually = false ## new test will have to be started manually from the admin panel. default false
#config.reset_manually = false ## if true, it never resets the experiment data, even if the configuration changes
config.include_rails_helper = true
config.redis = "redis://custom.redis.url:6380"
end
```
Split looks for the Redis host in the environment variable `REDIS_URL` then
defaults to `redis://localhost:6379` if not specified by configure block.
On platforms like Heroku, Split will use the value of `REDIS_PROVIDER` to
determine which env variable key to use when retrieving the host config. This
defaults to `REDIS_URL`.
### Filtering
In most scenarios you don't want to have AB-Testing enabled for web spiders, robots or special groups of users.
Split provides functionality to filter this based on a predefined, extensible list of bots, IP-lists or custom exclude logic.
```ruby
Split.configure do |config|
# bot config
config.robot_regex = /my_custom_robot_regex/ # or
config.bots['newbot'] = "Description for bot with 'newbot' user agent, which will be added to config.robot_regex for exclusion"
# IP config
config.ignore_ip_addresses << '81.19.48.130' # or regex: /81\.19\.48\.[0-9]+/
# or provide your own filter functionality, the default is proc{ |request| is_robot? || is_ignored_ip_address? || is_preview? }
config.ignore_filter = -> (request) { CustomExcludeLogic.excludes?(request) }
end
```
### Experiment configuration
Instead of providing the experiment options inline, you can store them
in a hash. This hash can control your experiment's alternatives, weights,
algorithm and if the experiment resets once finished:
```ruby
Split.configure do |config|
config.experiments = {
my_first_experiment: {
alternatives: ["a", "b"],
resettable: false
},
:my_second_experiment => {
algorithm: 'Split::Algorithms::Whiplash',
alternatives: [
{ name: "a", percent: 67 },
{ name: "b", percent: 33 }
]
}
}
end
```
You can also store your experiments in a YAML file:
```ruby
Split.configure do |config|
config.experiments = YAML.load_file "config/experiments.yml"
end
```
You can then define the YAML file like:
```yaml
my_first_experiment:
alternatives:
- a
- b
my_second_experiment:
alternatives:
- name: a
And our initializer:
```ruby
rails_root = ENV['RAILS_ROOT'] || File.dirname(__FILE__) + '/../..'
rails_env = ENV['RAILS_ENV'] || 'development'
split_config = YAML.load_file(rails_root + '/config/split.yml')
Split.redis = split_config[rails_env]
```
## Namespaces
```ruby
ab_test(:my_first_experiment)
```
and:
```ruby
ab_finished(:my_first_experiment)
```
You can also add meta data for each experiment, which is very useful when you need more than an alternative name to change behaviour:
```ruby
Split.configure do |config|
config.experiments = {
my_first_experiment: {
alternatives: ["a", "b"],
metadata: {
"a" => {"text" => "Have a fantastic day"},
"b" => {"text" => "Don't get hit by a bus"}
}
}
}
end
```
```yaml
my_first_experiment:
alternatives:
- a
- b
metadata:
a:
text: "Have a fantastic day"
b:
text: "Don't get hit by a bus"
```
This allows for some advanced experiment configuration using methods like:
```ruby
trial.alternative.name # => "a"
trial.metadata['text'] # => "Have a fantastic day"
```
or in views:
```erb
<% ab_test("my_first_experiment") do |alternative, meta| %>
<%= alternative %>
<small><%= meta['text'] %></small>
<% end %>
```
The keys used in meta data should be Strings
#### Metrics
You might wish to track generic metrics, such as conversions, and use
those to complete multiple different experiments without adding more to
your code. You can use the configuration hash to do this, thanks to
the `:metric` option.
```ruby
Split.configure do |config|
config.experiments = {
my_first_experiment: {
alternatives: ["a", "b"],
metric: :my_metric
}
}
end
```
Your code may then track a completion using the metric instead of
the experiment name:
```ruby
ab_finished(:my_metric)
```
You can also create a new metric by instantiating and saving a new Metric object.
```ruby
Split::Metric.new(:my_metric)
Split::Metric.save
```
#### Goals
You might wish to allow an experiment to have multiple, distinguishable goals.
The API to define goals for an experiment is this:
```ruby
ab_test({link_color: ["purchase", "refund"]}, "red", "blue")
```
or you can define them in a configuration file:
```ruby
Split.configure do |config|
config.experiments = {
link_color: {
alternatives: ["red", "blue"],
goals: ["purchase", "refund"]
}
}
end
```
To complete a goal conversion, you do it like:
```ruby
ab_finished(link_color: "purchase")
```
Note that if you pass additional options, that should be a separate hash:
```ruby
ab_finished({ link_color: "purchase" }, reset: false)
```
**NOTE:** This does not mean that a single experiment can complete more than one goal.
Once you finish one of the goals, the test is considered to be completed, and finishing the other goal will no longer register. (Assuming the test runs with `reset: false`.)
**Good Example**: Test if listing Plan A first result in more conversions to Plan A (goal: "plana_conversion") or Plan B (goal: "planb_conversion").
**Bad Example**: Test if button color increases conversion rate through multiple steps of a funnel. THIS WILL NOT WORK.
**Bad Example**: Test both how button color affects signup *and* how it affects login, at the same time. THIS WILL NOT WORK.
#### Combined Experiments
If you want to test how button color affects signup *and* how it affects login at the same time, use combined experiments.
Configure like so:
```ruby
Split.configuration.experiments = {
:button_color_experiment => {
:alternatives => ["blue", "green"],
:combined_experiments => ["button_color_on_signup", "button_color_on_login"]
}
}
```
Starting the combined test starts all combined experiments
```ruby
ab_combined_test(:button_color_experiment)
```
Finish each combined test as normal
```ruby
ab_finished(:button_color_on_login)
ab_finished(:button_color_on_signup)
```
**Additional Configuration**:
* Be sure to enable `allow_multiple_experiments`
* In Sinatra include the CombinedExperimentsHelper
```
helpers Split::CombinedExperimentsHelper
```
### DB failover solution
Due to the fact that Redis has no automatic failover mechanism, it's
possible to switch on the `db_failover` config option, so that `ab_test`
and `ab_finished` will not crash in case of a db failure. `ab_test` always
delivers alternative A (the first one) in that case.
It's also possible to set a `db_failover_on_db_error` callback (proc)
for example to log these errors via Rails.logger.
### Redis
You may want to change the Redis host and port Split connects to, or
set various other options at startup.
Split has a `redis` setter which can be given a string or a Redis
object. This means if you're already using Redis in your app, Split
can re-use the existing connection.
String: `Split.redis = 'redis://localhost:6379'`
Redis: `Split.redis = $redis`
For our rails app we have a `config/initializers/split.rb` file where
we load `config/split.yml` by hand and set the Redis information
appropriately.
Here's our `config/split.yml`:
```yml
development: redis://localhost:6379
test: redis://localhost:6379
staging: redis://redis1.example.com:6379
fi: redis://localhost:6379
production: redis://redis1.example.com:6379
```
And our initializer:
```ruby
split_config = YAML.load_file(Rails.root.join('config', 'split.yml'))
Split.redis = split_config[Rails.env]
```
### Redis Caching (v4.0+)
In some high-volume usage scenarios, Redis load can be incurred by repeated
fetches for fairly static data. Enabling caching will reduce this load.
```ruby
Split.configuration.cache = true
````
This currently caches:
- `Split::ExperimentCatalog.find`
- `Split::Experiment.start_time`
- `Split::Experiment.winner`
## Namespaces
If you're running multiple, separate instances of Split you may want
to namespace the keyspaces so they do not overlap. This is not unlike
the approach taken by many memcached clients.
This feature can be provided by the [redis-namespace](https://github.com/defunkt/redis-namespace)
library. To configure Split to use `Redis::Namespace`, do the following:
1. Add `redis-namespace` to your Gemfile:
```ruby
gem 'redis-namespace'
```
2. Configure `Split.redis` to use a `Redis::Namespace` instance (possible in an
initializer):
```ruby
redis = Redis.new(url: ENV['REDIS_URL']) # or whatever config you want
Split.redis = Redis::Namespace.new(:your_namespace, redis: redis)
```
## Outside of a Web Session
Split provides the Helper module to facilitate running experiments inside web sessions.
Alternatively, you can access the underlying Metric, Trial, Experiment and Alternative objects to
conduct experiments that are not tied to a web session.
```ruby
# create a new experiment
experiment = Split::ExperimentCatalog.find_or_create('color', 'red', 'blue')
# create a new trial
trial = Split::Trial.new(:experiment => experiment)
# run trial
trial.choose!
# get the result, returns either red or blue
trial.alternative.name
# if the goal has been achieved, increment the successful completions for this alternative.
if goal_achieved?
trial.complete!
end
```
## Algorithms
By default, Split ships with `Split::Algorithms::WeightedSample` that randomly selects from possible alternatives for a traditional a/b test.
It is possible to specify static weights to favor certain alternatives.
`Split::Algorithms::Whiplash` is an implementation of a [multi-armed bandit algorithm](http://stevehanov.ca/blog/index.php?id=132).
This algorithm will automatically weight the alternatives based on their relative performance,
choosing the better-performing ones more often as trials are completed.
`Split::Algorithms::BlockRandomization` is an algorithm that ensures equal
participation across all alternatives. This algorithm will choose the alternative
with the fewest participants. In the event of multiple minimum participant alternatives
(i.e. starting a new "Block") the algorithm will choose a random alternative from
those minimum participant alternatives.
Users may also write their own algorithms. The default algorithm may be specified globally in the configuration file, or on a per experiment basis using the experiments hash of the configuration file.
To change the algorithm globally for all experiments, use the following in your initializer:
```ruby
Split.configure do |config|
config.algorithm = Split::Algorithms::Whiplash
end
```
## Extensions
- [Split::Export](https://github.com/splitrb/split-export) - Easily export A/B test data out of Split.
- [Split::Analytics](https://github.com/splitrb/split-analytics) - Push test data to Google Analytics.
- [Split::Mongoid](https://github.com/MongoHQ/split-mongoid) - Store experiment data in mongoid (still uses redis).
- [Split::Cacheable](https://github.com/harrystech/split_cacheable) - Automatically create cache buckets per test.
- [Split::Counters](https://github.com/bernardkroes/split-counters) - Add counters per experiment and alternative.
- [Split::Cli](https://github.com/craigmcnamara/split-cli) - A CLI to trigger Split A/B tests.
## Screencast
Ryan bates has produced an excellent 10 minute screencast about split on the Railscasts site: [A/B Testing with Split](http://railscasts.com/episodes/331-a-b-testing-with-split)
## Blogposts
* [Recipe: A/B testing with KISSMetrics and the split gem](https://robots.thoughtbot.com/post/9595887299/recipe-a-b-testing-with-kissmetrics-and-the-split-gem)
* [Rails A/B testing with Split on Heroku](http://blog.nathanhumbert.com/2012/02/rails-ab-testing-with-split-on-heroku.html)
## Backers
Support us with a monthly donation and help us continue our activities. [[Become a backer](https://opencollective.com/split#backer)]
<a href="https://opencollective.com/split/backer/0/website" target="_blank"><img src="https://opencollective.com/split/backer/0/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/1/website" target="_blank"><img src="https://opencollective.com/split/backer/1/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/2/website" target="_blank"><img src="https://opencollective.com/split/backer/2/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/3/website" target="_blank"><img src="https://opencollective.com/split/backer/3/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/4/website" target="_blank"><img src="https://opencollective.com/split/backer/4/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/5/website" target="_blank"><img src="https://opencollective.com/split/backer/5/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/6/website" target="_blank"><img src="https://opencollective.com/split/backer/6/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/7/website" target="_blank"><img src="https://opencollective.com/split/backer/7/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/8/website" target="_blank"><img src="https://opencollective.com/split/backer/8/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/9/website" target="_blank"><img src="https://opencollective.com/split/backer/9/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/10/website" target="_blank"><img src="https://opencollective.com/split/backer/10/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/11/website" target="_blank"><img src="https://opencollective.com/split/backer/11/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/12/website" target="_blank"><img src="https://opencollective.com/split/backer/12/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/13/website" target="_blank"><img src="https://opencollective.com/split/backer/13/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/14/website" target="_blank"><img src="https://opencollective.com/split/backer/14/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/15/website" target="_blank"><img src="https://opencollective.com/split/backer/15/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/16/website" target="_blank"><img src="https://opencollective.com/split/backer/16/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/17/website" target="_blank"><img src="https://opencollective.com/split/backer/17/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/18/website" target="_blank"><img src="https://opencollective.com/split/backer/18/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/19/website" target="_blank"><img src="https://opencollective.com/split/backer/19/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/20/website" target="_blank"><img src="https://opencollective.com/split/backer/20/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/21/website" target="_blank"><img src="https://opencollective.com/split/backer/21/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/22/website" target="_blank"><img src="https://opencollective.com/split/backer/22/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/23/website" target="_blank"><img src="https://opencollective.com/split/backer/23/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/24/website" target="_blank"><img src="https://opencollective.com/split/backer/24/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/25/website" target="_blank"><img src="https://opencollective.com/split/backer/25/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/26/website" target="_blank"><img src="https://opencollective.com/split/backer/26/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/27/website" target="_blank"><img src="https://opencollective.com/split/backer/27/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/28/website" target="_blank"><img src="https://opencollective.com/split/backer/28/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/29/website" target="_blank"><img src="https://opencollective.com/split/backer/29/avatar.svg"></a>
## Sponsors
Become a sponsor and get your logo on our README on Github with a link to your site. [[Become a sponsor](https://opencollective.com/split#sponsor)]
<a href="https://opencollective.com/split/sponsor/0/website" target="_blank"><img src="https://opencollective.com/split/sponsor/0/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/1/website" target="_blank"><img src="https://opencollective.com/split/sponsor/1/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/2/website" target="_blank"><img src="https://opencollective.com/split/sponsor/2/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/3/website" target="_blank"><img src="https://opencollective.com/split/sponsor/3/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/4/website" target="_blank"><img src="https://opencollective.com/split/sponsor/4/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/5/website" target="_blank"><img src="https://opencollective.com/split/sponsor/5/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/6/website" target="_blank"><img src="https://opencollective.com/split/sponsor/6/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/7/website" target="_blank"><img src="https://opencollective.com/split/sponsor/7/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/8/website" target="_blank"><img src="https://opencollective.com/split/sponsor/8/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/9/website" target="_blank"><img src="https://opencollective.com/split/sponsor/9/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/10/website" target="_blank"><img src="https://opencollective.com/split/sponsor/10/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/11/website" target="_blank"><img src="https://opencollective.com/split/sponsor/11/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/12/website" target="_blank"><img src="https://opencollective.com/split/sponsor/12/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/13/website" target="_blank"><img src="https://opencollective.com/split/sponsor/13/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/14/website" target="_blank"><img src="https://opencollective.com/split/sponsor/14/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/15/website" target="_blank"><img src="https://opencollective.com/split/sponsor/15/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/16/website" target="_blank"><img src="https://opencollective.com/split/sponsor/16/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/17/website" target="_blank"><img src="https://opencollective.com/split/sponsor/17/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/18/website" target="_blank"><img src="https://opencollective.com/split/sponsor/18/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/19/website" target="_blank"><img src="https://opencollective.com/split/sponsor/19/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/20/website" target="_blank"><img src="https://opencollective.com/split/sponsor/20/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/21/website" target="_blank"><img src="https://opencollective.com/split/sponsor/21/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/22/website" target="_blank"><img src="https://opencollective.com/split/sponsor/22/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/23/website" target="_blank"><img src="https://opencollective.com/split/sponsor/23/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/24/website" target="_blank"><img src="https://opencollective.com/split/sponsor/24/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/25/website" target="_blank"><img src="https://opencollective.com/split/sponsor/25/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/26/website" target="_blank"><img src="https://opencollective.com/split/sponsor/26/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/27/website" target="_blank"><img src="https://opencollective.com/split/sponsor/27/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/28/website" target="_blank"><img src="https://opencollective.com/split/sponsor/28/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/29/website" target="_blank"><img src="https://opencollective.com/split/sponsor/29/avatar.svg"></a>
## Contribute
Please do! Over 70 different people have contributed to the project, you can see them all here: https://github.com/splitrb/split/graphs/contributors.
### Development
The source code is hosted at [GitHub](https://github.com/splitrb/split).
Report issues and feature requests on [GitHub Issues](https://github.com/splitrb/split/issues).
You can find a discussion form on [Google Groups](https://groups.google.com/d/forum/split-ruby).
### Tests
Run the tests like this:
# Start a Redis server in another tab.
redis-server
bundle
rake spec
### A Note on Patches and Pull Requests
* Fork the project.
* Make your feature addition or bug fix.
* Add tests for it. This is important so I don't break it in a
future version unintentionally.
* Add documentation if necessary.
* Commit. Do not mess with the rakefile, version, or history.
(If you want to have your own version, that is fine. But bump the version in a commit by itself, which I can ignore when I pull.)
* Send a pull request. Bonus points for topic branches.
### Code of Conduct
Please note that this project is released with a [Contributor Code of Conduct](CODE_OF_CONDUCT.md). By participating in this project you agree to abide by its terms.
## Copyright
[MIT License](LICENSE) © 2019 [Andrew Nesbitt](https://github.com/andrew).
<MSG> update README with new syntax
<DFF> @@ -549,11 +549,8 @@ production: redis1.example.com:6379
And our initializer:
```ruby
-rails_root = ENV['RAILS_ROOT'] || File.dirname(__FILE__) + '/../..'
-rails_env = ENV['RAILS_ENV'] || 'development'
-
-split_config = YAML.load_file(rails_root + '/config/split.yml')
-Split.redis = split_config[rails_env]
+split_config = YAML.load_file(Rails.root.join('config', 'split.yml'))
+Split.redis = split_config[Rails.env]
```
## Namespaces
| 2 | update README with new syntax | 5 | .md | md | mit | splitrb/split |
10071082 | <NME> helper_spec.rb
<BEF> # frozen_string_literal: true
require "spec_helper"
# TODO change some of these tests to use Rack::Test
describe Split::Helper do
include Split::Helper
let(:experiment) {
Split::ExperimentCatalog.find_or_create("link_color", "blue", "red")
}
describe "ab_test" do
it "should not raise an error when passed strings for alternatives" do
expect { ab_test("xyz", "1", "2", "3") }.not_to raise_error
end
it "should not raise an error when passed an array for alternatives" do
expect { ab_test("xyz", ["1", "2", "3"]) }.not_to raise_error
end
it "should raise the appropriate error when passed integers for alternatives" do
expect { ab_test("xyz", 1, 2, 3) }.to raise_error(ArgumentError)
end
it "should raise the appropriate error when passed symbols for alternatives" do
expect { ab_test("xyz", :a, :b, :c) }.to raise_error(ArgumentError)
end
it "should not raise error when passed an array for goals" do
expect { ab_test({ "link_color" => ["purchase", "refund"] }, "blue", "red") }.not_to raise_error
end
it "should not raise error when passed just one goal" do
expect { ab_test({ "link_color" => "purchase" }, "blue", "red") }.not_to raise_error
end
it "raises an appropriate error when processing combined expirements" do
Split.configuration.experiments = {
combined_exp_1: {
alternatives: [ { name: "control", percent: 50 }, { name: "test-alt", percent: 50 } ],
metric: :my_metric,
combined_experiments: [:combined_exp_1_sub_1]
}
}
Split::ExperimentCatalog.find_or_create("combined_exp_1")
expect { ab_test("combined_exp_1") }.to raise_error(Split::InvalidExperimentsFormatError)
end
it "should assign a random alternative to a new user when there are an equal number of alternatives assigned" do
ab_test("link_color", "blue", "red")
expect(["red", "blue"]).to include(ab_user["link_color"])
end
it "should increment the participation counter after assignment to a new user" do
previous_red_count = Split::Alternative.new("red", "link_color").participant_count
previous_blue_count = Split::Alternative.new("blue", "link_color").participant_count
ab_test("link_color", "blue", "red")
new_red_count = Split::Alternative.new("red", "link_color").participant_count
new_blue_count = Split::Alternative.new("blue", "link_color").participant_count
expect((new_red_count + new_blue_count)).to eq(previous_red_count + previous_blue_count + 1)
end
it "should not increment the counter for an experiment that the user is not participating in" do
ab_test("link_color", "blue", "red")
e = Split::ExperimentCatalog.find_or_create("button_size", "small", "big")
expect {
# User shouldn't participate in this second experiment
ab_test("button_size", "small", "big")
}.not_to change { e.participant_count }
end
it "should not increment the counter for an ended experiment" do
e = Split::ExperimentCatalog.find_or_create("button_size", "small", "big")
e.winner = "small"
expect {
a = ab_test("button_size", "small", "big")
expect(a).to eq("small")
}.not_to change { e.participant_count }
end
it "should not increment the counter for an not started experiment" do
expect(Split.configuration).to receive(:start_manually).and_return(true)
e = Split::ExperimentCatalog.find_or_create("button_size", "small", "big")
expect {
a = ab_test("button_size", "small", "big")
expect(a).to eq("small")
}.not_to change { e.participant_count }
end
it "should return the given alternative for an existing user" do
expect(ab_test("link_color", "blue", "red")).to eq ab_test("link_color", "blue", "red")
end
it "should always return the winner if one is present" do
experiment.winner = "orange"
expect(ab_test("link_color", "blue", "red")).to eq("orange")
end
it "should allow the alternative to be forced by passing it in the params" do
# ?ab_test[link_color]=blue
@params = { "ab_test" => { "link_color" => "blue" } }
alternative = ab_test("link_color", "blue", "red")
expect(alternative).to eq("blue")
alternative = ab_test("link_color", { "blue" => 1 }, "red" => 5)
expect(alternative).to eq("blue")
@params = { "ab_test" => { "link_color" => "red" } }
alternative = ab_test("link_color", "blue", "red")
expect(alternative).to eq("red")
alternative = ab_test("link_color", { "blue" => 5 }, "red" => 1)
expect(alternative).to eq("red")
end
it "should not allow an arbitrary alternative" do
@params = { "ab_test" => { "link_color" => "pink" } }
alternative = ab_test("link_color", "blue")
expect(alternative).to eq("blue")
end
it "should not store the split when a param forced alternative" do
@params = { "ab_test" => { "link_color" => "blue" } }
expect(ab_user).not_to receive(:[]=)
ab_test("link_color", "blue", "red")
end
it "SPLIT_DISABLE query parameter should also force the alternative (uses control)" do
@params = { "SPLIT_DISABLE" => "true" }
alternative = ab_test("link_color", "blue", "red")
expect(alternative).to eq("blue")
alternative = ab_test("link_color", { "blue" => 1 }, "red" => 5)
expect(alternative).to eq("blue")
alternative = ab_test("link_color", "red", "blue")
expect(alternative).to eq("red")
alternative = ab_test("link_color", { "red" => 5 }, "blue" => 1)
expect(alternative).to eq("red")
end
it "should not store the split when Split generically disabled" do
@params = { "SPLIT_DISABLE" => "true" }
expect(ab_user).not_to receive(:[]=)
ab_test("link_color", "blue", "red")
end
context "when store_override is set" do
before { Split.configuration.store_override = true }
it "should store the forced alternative" do
@params = { "ab_test" => { "link_color" => "blue" } }
expect(ab_user).to receive(:[]=).with("link_color", "blue")
ab_test("link_color", "blue", "red")
end
end
context "when on_trial_choose is set" do
before { Split.configuration.on_trial_choose = :some_method }
it "should call the method" do
expect(self).to receive(:some_method)
ab_test("link_color", "blue", "red")
end
end
it "should allow passing a block" do
alt = ab_test("link_color", "blue", "red")
ret = ab_test("link_color", "blue", "red") { |alternative| "shared/#{alternative}" }
expect(ret).to eq("shared/#{alt}")
end
it "should allow the share of visitors see an alternative to be specified" do
ab_test("link_color", { "blue" => 0.8 }, { "red" => 20 })
expect(["red", "blue"]).to include(ab_user["link_color"])
end
it "should allow alternative weighting interface as a single hash" do
ab_test("link_color", { "blue" => 0.01 }, "red" => 0.2)
experiment = Split::ExperimentCatalog.find("link_color")
expect(experiment.alternatives.map(&:name)).to eq(["blue", "red"])
expect(experiment.alternatives.collect { |a| a.weight }).to match_array([0.01, 0.2])
end
it "should only let a user participate in one experiment at a time" do
link_color = ab_test("link_color", "blue", "red")
ab_test("button_size", "small", "big")
expect(ab_user["link_color"]).to eq(link_color)
big = Split::Alternative.new("big", "button_size")
expect(big.participant_count).to eq(0)
small = Split::Alternative.new("small", "button_size")
expect(small.participant_count).to eq(0)
end
it "should let a user participate in many experiment with allow_multiple_experiments option" do
Split.configure do |config|
config.allow_multiple_experiments = true
end
link_color = ab_test("link_color", "blue", "red")
button_size = ab_test("button_size", "small", "big")
expect(ab_user["link_color"]).to eq(link_color)
expect(ab_user["button_size"]).to eq(button_size)
button_size_alt = Split::Alternative.new(button_size, "button_size")
expect(button_size_alt.participant_count).to eq(1)
end
context "with allow_multiple_experiments = 'control'" do
it "should let a user participate in many experiment with one non-'control' alternative" do
Split.configure do |config|
config.allow_multiple_experiments = "control"
end
groups = 100.times.map do |n|
ab_test("test#{n}".to_sym, { "control" => (100 - n) }, { "test#{n}-alt" => n })
end
experiments = ab_user.active_experiments
expect(experiments.size).to be > 1
count_control = experiments.values.count { |g| g == "control" }
expect(count_control).to eq(experiments.size - 1)
count_alts = groups.count { |g| g != "control" }
expect(count_alts).to eq(1)
end
context "when user already has experiment" do
let(:mock_user) { Split::User.new(self, { "test_0" => "test-alt" }) }
before do
Split.configure do |config|
config.allow_multiple_experiments = "control"
end
Split::ExperimentCatalog.find_or_initialize("test_0", "control", "test-alt").save
Split::ExperimentCatalog.find_or_initialize("test_1", "control", "test-alt").save
end
it "should restore previously selected alternative" do
expect(ab_user.active_experiments.size).to eq 1
expect(ab_test(:test_0, { "control" => 100 }, { "test-alt" => 1 })).to eq "test-alt"
expect(ab_test(:test_0, { "control" => 1 }, { "test-alt" => 100 })).to eq "test-alt"
end
it "should select the correct alternatives after experiment resets" do
experiment = Split::ExperimentCatalog.find(:test_0)
experiment.reset
mock_user[experiment.key] = "test-alt"
expect(ab_user.active_experiments.size).to eq 1
expect(ab_test(:test_0, { "control" => 100 }, { "test-alt" => 1 })).to eq "test-alt"
expect(ab_test(:test_0, { "control" => 0 }, { "test-alt" => 100 })).to eq "test-alt"
end
it "lets override existing choice" do
pending "this requires user store reset on first call not depending on whelther it is current trial"
@params = { "ab_test" => { "test_1" => "test-alt" } }
expect(ab_test(:test_0, { "control" => 0 }, { "test-alt" => 100 })).to eq "control"
expect(ab_test(:test_1, { "control" => 100 }, { "test-alt" => 1 })).to eq "test-alt"
end
end
end
it "should not over-write a finished key when an experiment is on a later version" do
experiment.increment_version
ab_user = { experiment.key => "blue", experiment.finished_key => true }
finished_session = ab_user.dup
ab_test("link_color", "blue", "red")
expect(ab_user).to eq(finished_session)
end
end
describe "metadata" do
context "is defined" do
before do
Split.configuration.experiments = {
my_experiment: {
alternatives: ["one", "two"],
resettable: false,
metadata: { "one" => "Meta1", "two" => "Meta2" }
}
}
end
it "should be passed to helper block" do
@params = { "ab_test" => { "my_experiment" => "two" } }
expect(ab_test("my_experiment")).to eq "two"
expect(ab_test("my_experiment") do |alternative, meta|
meta
end).to eq("Meta2")
end
it "should pass control metadata helper block if library disabled" do
Split.configure do |config|
config.enabled = false
end
expect(ab_test("my_experiment")).to eq "one"
expect(ab_test("my_experiment") do |_, meta|
meta
end).to eq("Meta1")
end
end
context "is not defined" do
before do
Split.configuration.experiments = {
my_experiment: {
alternatives: ["one", "two"],
resettable: false,
metadata: nil
}
}
end
it "should be passed to helper block" do
expect(ab_test("my_experiment") do |alternative, meta|
meta
end).to eq({})
end
it "should pass control metadata helper block if library disabled" do
Split.configure do |config|
config.enabled = false
end
expect(ab_test("my_experiment") do |_, meta|
meta
end).to eq({})
end
end
end
describe "ab_finished" do
context "for an experiment that the user participates in" do
before(:each) do
@experiment_name = "link_color"
@alternatives = ["blue", "red"]
@experiment = Split::ExperimentCatalog.find_or_create(@experiment_name, *@alternatives)
@alternative_name = ab_test(@experiment_name, *@alternatives)
@previous_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count
end
it "should increment the counter for the completed alternative" do
ab_finished(@experiment_name)
new_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count
expect(new_completion_count).to eq(@previous_completion_count + 1)
end
it "should set experiment's finished key if reset is false" do
ab_finished(@experiment_name, { reset: false })
expect(ab_user[@experiment.key]).to eq(@alternative_name)
expect(ab_user[@experiment.finished_key]).to eq(true)
end
it "should not increment the counter if reset is false and the experiment has been already finished" do
2.times { ab_finished(@experiment_name, { reset: false }) }
new_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count
expect(new_completion_count).to eq(@previous_completion_count + 1)
end
it "should not increment the counter for an ended experiment" do
e = Split::ExperimentCatalog.find_or_create("button_size", "small", "big")
e.winner = "small"
a = ab_test("button_size", "small", "big")
expect(a).to eq("small")
expect {
ab_finished("button_size")
}.not_to change { Split::Alternative.new(a, "button_size").completed_count }
end
it "should clear out the user's participation from their session" do
expect(ab_user[@experiment.key]).to eq(@alternative_name)
ab_finished(@experiment_name)
expect(ab_user.keys).to be_empty
end
it "should not clear out the users session if reset is false" do
expect(ab_user[@experiment.key]).to eq(@alternative_name)
ab_finished(@experiment_name, { reset: false })
expect(ab_user[@experiment.key]).to eq(@alternative_name)
expect(ab_user[@experiment.finished_key]).to eq(true)
end
it "should reset the users session when experiment is not versioned" do
expect(ab_user[@experiment.key]).to eq(@alternative_name)
ab_finished(@experiment_name)
expect(ab_user.keys).to be_empty
end
it "should reset the users session when experiment is versioned" do
@experiment.increment_version
@alternative_name = ab_test(@experiment_name, *@alternatives)
expect(ab_user[@experiment.key]).to eq(@alternative_name)
ab_finished(@experiment_name)
expect(ab_user.keys).to be_empty
end
context "when on_trial_complete is set" do
before { Split.configuration.on_trial_complete = :some_method }
it "should call the method" do
expect(self).to receive(:some_method)
ab_finished(@experiment_name)
end
it "should not call the method without alternative" do
ab_user[@experiment.key] = nil
expect(self).not_to receive(:some_method)
ab_finished(@experiment_name)
end
end
end
context "for an experiment that the user is excluded from" do
before do
alternative = ab_test("link_color", "blue", "red")
expect(Split::Alternative.new(alternative, "link_color").participant_count).to eq(1)
alternative = ab_test("button_size", "small", "big")
expect(Split::Alternative.new(alternative, "button_size").participant_count).to eq(0)
end
it "should not increment the completed counter" do
# So, user should be participating in the link_color experiment and
# receive the control for button_size. As the user is not participating in
# the button size experiment, finishing it should not increase the
# completion count for that alternative.
expect {
ab_finished("button_size")
}.not_to change { Split::Alternative.new("small", "button_size").completed_count }
end
end
context "for an experiment that the user does not participate in" do
before do
Split::ExperimentCatalog.find_or_create(:not_started_experiment, "control", "alt")
end
it "should not raise an exception" do
expect { ab_finished(:not_started_experiment) }.not_to raise_exception
end
it "should not change the user state when reset is false" do
expect { ab_finished(:not_started_experiment, reset: false) }.not_to change { ab_user.keys }.from([])
end
it "should not change the user state when reset is true" do
expect(self).not_to receive(:reset!)
ab_finished(:not_started_experiment)
end
it "should not increment the completed counter" do
ab_finished(:not_started_experiment)
expect(Split::Alternative.new("control", :not_started_experiment).completed_count).to eq(0)
expect(Split::Alternative.new("alt", :not_started_experiment).completed_count).to eq(0)
end
end
end
context "finished with config" do
it "passes reset option" do
Split.configuration.experiments = {
my_experiment: {
alternatives: ["one", "two"],
resettable: false,
}
}
alternative = ab_test(:my_experiment)
experiment = Split::ExperimentCatalog.find :my_experiment
ab_finished :my_experiment
expect(ab_user[experiment.key]).to eq(alternative)
expect(ab_user[experiment.finished_key]).to eq(true)
end
end
context "finished with metric name" do
before { Split.configuration.experiments = {} }
before { expect(Split::Alternative).to receive(:new).at_least(1).times.and_call_original }
def should_finish_experiment(experiment_name, should_finish = true)
alts = Split.configuration.experiments[experiment_name][:alternatives]
experiment = Split::ExperimentCatalog.find_or_create(experiment_name, *alts)
alt_name = ab_user[experiment.key] = alts.first
alt = double("alternative")
expect(alt).to receive(:name).at_most(1).times.and_return(alt_name)
expect(Split::Alternative).to receive(:new).at_most(1).times.with(alt_name, experiment_name.to_s).and_return(alt)
if should_finish
expect(alt).to receive(:increment_completion).at_most(1).times
else
expect(alt).not_to receive(:increment_completion)
end
end
it "completes the test" do
Split.configuration.experiments[:my_experiment] = {
alternatives: [ "control_opt", "other_opt" ],
metric: :my_metric
}
should_finish_experiment :my_experiment
ab_finished :my_metric
end
it "completes all relevant tests" do
Split.configuration.experiments = {
exp_1: {
alternatives: [ "1-1", "1-2" ],
metric: :my_metric
},
exp_2: {
alternatives: [ "2-1", "2-2" ],
metric: :another_metric
},
exp_3: {
alternatives: [ "3-1", "3-2" ],
metric: :my_metric
},
}
should_finish_experiment :exp_1
should_finish_experiment :exp_2, false
should_finish_experiment :exp_3
ab_finished :my_metric
end
it "passes reset option" do
Split.configuration.experiments = {
my_exp: {
alternatives: ["one", "two"],
metric: :my_metric,
resettable: false,
}
}
alternative_name = ab_test(:my_exp)
exp = Split::ExperimentCatalog.find :my_exp
ab_finished :my_metric
expect(ab_user[exp.key]).to eq(alternative_name)
expect(ab_user[exp.finished_key]).to be_truthy
end
it "passes through options" do
Split.configuration.experiments = {
my_exp: {
alternatives: ["one", "two"],
metric: :my_metric,
}
}
alternative_name = ab_test(:my_exp)
exp = Split::ExperimentCatalog.find :my_exp
ab_finished :my_metric, reset: false
expect(ab_user[exp.key]).to eq(alternative_name)
expect(ab_user[exp.finished_key]).to be_truthy
end
end
describe "conversions" do
it "should return a conversion rate for an alternative" do
alternative_name = ab_test("link_color", "blue", "red")
previous_convertion_rate = Split::Alternative.new(alternative_name, "link_color").conversion_rate
expect(previous_convertion_rate).to eq(0.0)
ab_finished("link_color")
new_convertion_rate = Split::Alternative.new(alternative_name, "link_color").conversion_rate
expect(new_convertion_rate).to eq(1.0)
end
end
describe "active experiments" do
it "should show an active test" do
alternative = ab_test("def", "4", "5", "6")
expect(active_experiments.count).to eq 1
expect(active_experiments.first[0]).to eq "def"
expect(active_experiments.first[1]).to eq alternative
end
it "should show a finished test" do
alternative = ab_test("def", "4", "5", "6")
ab_finished("def", { reset: false })
expect(active_experiments.count).to eq 1
expect(active_experiments.first[0]).to eq "def"
expect(active_experiments.first[1]).to eq alternative
end
it "should show an active test when an experiment is on a later version" do
experiment.reset
expect(experiment.version).to eq(1)
ab_test("link_color", "blue", "red")
expect(active_experiments.count).to eq 1
expect(active_experiments.first[0]).to eq "link_color"
end
it "should show versioned tests properly" do
10.times { experiment.reset }
alternative = ab_test(experiment.name, "blue", "red")
ab_finished(experiment.name, reset: false)
expect(experiment.version).to eq(10)
expect(active_experiments.count).to eq 1
expect(active_experiments).to eq({ "link_color" => alternative })
end
it "should show multiple tests" do
Split.configure do |config|
config.allow_multiple_experiments = true
end
alternative = ab_test("def", "4", "5", "6")
another_alternative = ab_test("ghi", "7", "8", "9")
expect(active_experiments.count).to eq 2
expect(active_experiments["def"]).to eq alternative
expect(active_experiments["ghi"]).to eq another_alternative
end
it "should not show tests with winners" do
Split.configure do |config|
config.allow_multiple_experiments = true
end
e = Split::ExperimentCatalog.find_or_create("def", "4", "5", "6")
e.winner = "4"
ab_test("def", "4", "5", "6")
another_alternative = ab_test("ghi", "7", "8", "9")
expect(active_experiments.count).to eq 1
expect(active_experiments.first[0]).to eq "ghi"
expect(active_experiments.first[1]).to eq another_alternative
end
end
describe "when user is a robot" do
before(:each) do
@request = OpenStruct.new(user_agent: "Googlebot/2.1 (+http://www.google.com/bot.html)")
end
describe "ab_test" do
it "should return the control" do
alternative = ab_test("link_color", "blue", "red")
expect(alternative).to eq experiment.control.name
end
it "should not create a experiment" do
ab_test("link_color", "blue", "red")
expect(Split::Experiment.new("link_color")).to be_a_new_record
names_and_weights.should == [['control_opt', 0.18], ['second_opt', 0.18], ['third_opt', 0.64]]
names_and_weights.inject(0){|sum, nw| sum + nw[1]}.should == 1.0
end
end
it 'should handle multiple experiments correctly' do
ab_finished("link_color")
new_completion_count = Split::Alternative.new(alternative_name, "link_color").completed_count
expect(new_completion_count).to eq(previous_completion_count)
end
end
end
describe "when providing custom ignore logic" do
context "using a proc to configure custom logic" do
before(:each) do
Split.configure do |c|
c.ignore_filter = proc { |request| true } # ignore everything
end
end
it "ignores the ab_test" do
ab_test("link_color", "blue", "red")
red_count = Split::Alternative.new("red", "link_color").participant_count
blue_count = Split::Alternative.new("blue", "link_color").participant_count
expect((red_count + blue_count)).to be(0)
end
end
end
shared_examples_for "a disabled test" do
describe "ab_test" do
it "should return the control" do
alternative = ab_test("link_color", "blue", "red")
expect(alternative).to eq experiment.control.name
end
it "should not increment the participation count" do
previous_red_count = Split::Alternative.new("red", "link_color").participant_count
previous_blue_count = Split::Alternative.new("blue", "link_color").participant_count
ab_test("link_color", "blue", "red")
new_red_count = Split::Alternative.new("red", "link_color").participant_count
new_blue_count = Split::Alternative.new("blue", "link_color").participant_count
expect((new_red_count + new_blue_count)).to eq(previous_red_count + previous_blue_count)
end
end
describe "finished" do
it "should not increment the completed count" do
alternative_name = ab_test("link_color", "blue", "red")
previous_completion_count = Split::Alternative.new(alternative_name, "link_color").completed_count
ab_finished("link_color")
new_completion_count = Split::Alternative.new(alternative_name, "link_color").completed_count
expect(new_completion_count).to eq(previous_completion_count)
end
end
end
describe "when ip address is ignored" do
context "individually" do
before(:each) do
@request = OpenStruct.new(ip: "81.19.48.130")
Split.configure do |c|
c.ignore_ip_addresses << "81.19.48.130"
end
end
it_behaves_like "a disabled test"
end
context "for a range" do
before(:each) do
@request = OpenStruct.new(ip: "81.19.48.129")
Split.configure do |c|
c.ignore_ip_addresses << /81\.19\.48\.[0-9]+/
end
end
it_behaves_like "a disabled test"
end
context "using both a range and a specific value" do
before(:each) do
@request = OpenStruct.new(ip: "81.19.48.128")
Split.configure do |c|
c.ignore_ip_addresses << "81.19.48.130"
c.ignore_ip_addresses << /81\.19\.48\.[0-9]+/
end
end
it_behaves_like "a disabled test"
end
context "when ignored other address" do
before do
@request = OpenStruct.new(ip: "1.1.1.1")
Split.configure do |c|
c.ignore_ip_addresses << "81.19.48.130"
end
end
it "works as usual" do
alternative_name = ab_test("link_color", "red", "blue")
expect {
ab_finished("link_color")
}.to change(Split::Alternative.new(alternative_name, "link_color"), :completed_count).by(1)
end
end
end
describe "when user is previewing" do
before(:each) do
@request = OpenStruct.new(headers: { "x-purpose" => "preview" })
end
it_behaves_like "a disabled test"
end
describe "versioned experiments" do
it "should use version zero if no version is present" do
alternative_name = ab_test("link_color", "blue", "red")
expect(experiment.version).to eq(0)
expect(ab_user["link_color"]).to eq(alternative_name)
end
it "should save the version of the experiment to the session" do
experiment.reset
expect(experiment.version).to eq(1)
alternative_name = ab_test("link_color", "blue", "red")
expect(ab_user["link_color:1"]).to eq(alternative_name)
end
it "should load the experiment even if the version is not 0" do
experiment.reset
expect(experiment.version).to eq(1)
alternative_name = ab_test("link_color", "blue", "red")
expect(ab_user["link_color:1"]).to eq(alternative_name)
return_alternative_name = ab_test("link_color", "blue", "red")
expect(return_alternative_name).to eq(alternative_name)
end
it "should reset the session of a user on an older version of the experiment" do
alternative_name = ab_test("link_color", "blue", "red")
expect(ab_user["link_color"]).to eq(alternative_name)
alternative = Split::Alternative.new(alternative_name, "link_color")
expect(alternative.participant_count).to eq(1)
experiment.reset
expect(experiment.version).to eq(1)
alternative = Split::Alternative.new(alternative_name, "link_color")
expect(alternative.participant_count).to eq(0)
new_alternative_name = ab_test("link_color", "blue", "red")
expect(ab_user["link_color:1"]).to eq(new_alternative_name)
new_alternative = Split::Alternative.new(new_alternative_name, "link_color")
expect(new_alternative.participant_count).to eq(1)
end
it "should cleanup old versions of experiments from the session" do
alternative_name = ab_test("link_color", "blue", "red")
expect(ab_user["link_color"]).to eq(alternative_name)
alternative = Split::Alternative.new(alternative_name, "link_color")
expect(alternative.participant_count).to eq(1)
experiment.reset
expect(experiment.version).to eq(1)
alternative = Split::Alternative.new(alternative_name, "link_color")
expect(alternative.participant_count).to eq(0)
new_alternative_name = ab_test("link_color", "blue", "red")
expect(ab_user["link_color:1"]).to eq(new_alternative_name)
end
it "should only count completion of users on the current version" do
alternative_name = ab_test("link_color", "blue", "red")
expect(ab_user["link_color"]).to eq(alternative_name)
Split::Alternative.new(alternative_name, "link_color")
experiment.reset
expect(experiment.version).to eq(1)
ab_finished("link_color")
alternative = Split::Alternative.new(alternative_name, "link_color")
expect(alternative.completed_count).to eq(0)
end
end
context "when redis is not available" do
before(:each) do
expect(Split).to receive(:redis).at_most(5).times.and_raise(Errno::ECONNREFUSED.new)
end
context "and db_failover config option is turned off" do
before(:each) do
Split.configure do |config|
config.db_failover = false
end
end
describe "ab_test" do
it "should raise an exception" do
expect { ab_test("link_color", "blue", "red") }.to raise_error(Errno::ECONNREFUSED)
end
end
describe "finished" do
it "should raise an exception" do
expect { ab_finished("link_color") }.to raise_error(Errno::ECONNREFUSED)
end
end
describe "disable split testing" do
before(:each) do
Split.configure do |config|
config.enabled = false
end
end
it "should not attempt to connect to redis" do
expect { ab_test("link_color", "blue", "red") }.not_to raise_error
end
it "should return control variable" do
expect(ab_test("link_color", "blue", "red")).to eq("blue")
expect { ab_finished("link_color") }.not_to raise_error
end
end
end
context "and db_failover config option is turned on" do
before(:each) do
Split.configure do |config|
config.db_failover = true
end
end
describe "ab_test" do
it "should not raise an exception" do
expect { ab_test("link_color", "blue", "red") }.not_to raise_error
end
it "should call db_failover_on_db_error proc with error as parameter" do
Split.configure do |config|
config.db_failover_on_db_error = proc do |error|
expect(error).to be_a(Errno::ECONNREFUSED)
end
end
expect(Split.configuration.db_failover_on_db_error).to receive(:call).and_call_original
ab_test("link_color", "blue", "red")
end
it "should always use first alternative" do
expect(ab_test("link_color", "blue", "red")).to eq("blue")
expect(ab_test("link_color", { "blue" => 0.01 }, "red" => 0.2)).to eq("blue")
expect(ab_test("link_color", { "blue" => 0.8 }, { "red" => 20 })).to eq("blue")
expect(ab_test("link_color", "blue", "red") do |alternative|
"shared/#{alternative}"
end).to eq("shared/blue")
end
context "and db_failover_allow_parameter_override config option is turned on" do
before(:each) do
Split.configure do |config|
config.db_failover_allow_parameter_override = true
end
end
context "and given an override parameter" do
it "should use given override instead of the first alternative" do
@params = { "ab_test" => { "link_color" => "red" } }
expect(ab_test("link_color", "blue", "red")).to eq("red")
expect(ab_test("link_color", "blue", "red", "green")).to eq("red")
expect(ab_test("link_color", { "blue" => 0.01 }, "red" => 0.2)).to eq("red")
expect(ab_test("link_color", { "blue" => 0.8 }, { "red" => 20 })).to eq("red")
expect(ab_test("link_color", "blue", "red") do |alternative|
"shared/#{alternative}"
end).to eq("shared/red")
end
end
end
context "and preloaded config given" do
before do
Split.configuration.experiments[:link_color] = {
alternatives: [ "blue", "red" ],
}
end
it "uses first alternative" do
expect(ab_test(:link_color)).to eq("blue")
end
end
end
describe "finished" do
it "should not raise an exception" do
expect { ab_finished("link_color") }.not_to raise_error
end
it "should call db_failover_on_db_error proc with error as parameter" do
Split.configure do |config|
config.db_failover_on_db_error = proc do |error|
expect(error).to be_a(Errno::ECONNREFUSED)
end
end
expect(Split.configuration.db_failover_on_db_error).to receive(:call).and_call_original
ab_finished("link_color")
end
end
end
end
context "with preloaded config" do
before { Split.configuration.experiments = {} }
it "pulls options from config file" do
Split.configuration.experiments[:my_experiment] = {
alternatives: [ "control_opt", "other_opt" ],
goals: ["goal1", "goal2"]
}
ab_test :my_experiment
expect(Split::Experiment.new(:my_experiment).alternatives.map(&:name)).to eq([ "control_opt", "other_opt" ])
expect(Split::Experiment.new(:my_experiment).goals).to eq([ "goal1", "goal2" ])
end
it "can be called multiple times" do
Split.configuration.experiments[:my_experiment] = {
alternatives: [ "control_opt", "other_opt" ],
goals: ["goal1", "goal2"]
}
5.times { ab_test :my_experiment }
experiment = Split::Experiment.new(:my_experiment)
expect(experiment.alternatives.map(&:name)).to eq([ "control_opt", "other_opt" ])
expect(experiment.goals).to eq([ "goal1", "goal2" ])
expect(experiment.participant_count).to eq(1)
end
it "accepts multiple goals" do
Split.configuration.experiments[:my_experiment] = {
alternatives: [ "control_opt", "other_opt" ],
goals: [ "goal1", "goal2", "goal3" ]
}
ab_test :my_experiment
experiment = Split::Experiment.new(:my_experiment)
expect(experiment.goals).to eq([ "goal1", "goal2", "goal3" ])
end
it "allow specifying goals to be optional" do
Split.configuration.experiments[:my_experiment] = {
alternatives: [ "control_opt", "other_opt" ]
}
experiment = Split::Experiment.new(:my_experiment)
expect(experiment.goals).to eq([])
end
it "accepts multiple alternatives" do
Split.configuration.experiments[:my_experiment] = {
alternatives: [ "control_opt", "second_opt", "third_opt" ],
}
ab_test :my_experiment
experiment = Split::Experiment.new(:my_experiment)
expect(experiment.alternatives.map(&:name)).to eq([ "control_opt", "second_opt", "third_opt" ])
end
it "accepts probability on alternatives" do
Split.configuration.experiments[:my_experiment] = {
alternatives: [
{ name: "control_opt", percent: 67 },
{ name: "second_opt", percent: 10 },
{ name: "third_opt", percent: 23 },
],
}
ab_test :my_experiment
experiment = Split::Experiment.new(:my_experiment)
expect(experiment.alternatives.collect { |a| [a.name, a.weight] }).to eq([["control_opt", 0.67], ["second_opt", 0.1], ["third_opt", 0.23]])
end
it "accepts probability on some alternatives" do
Split.configuration.experiments[:my_experiment] = {
alternatives: [
{ name: "control_opt", percent: 34 },
"second_opt",
{ name: "third_opt", percent: 23 },
"fourth_opt",
],
}
ab_test :my_experiment
experiment = Split::Experiment.new(:my_experiment)
names_and_weights = experiment.alternatives.collect { |a| [a.name, a.weight] }
expect(names_and_weights).to eq([["control_opt", 0.34], ["second_opt", 0.215], ["third_opt", 0.23], ["fourth_opt", 0.215]])
expect(names_and_weights.inject(0) { |sum, nw| sum + nw[1] }).to eq(1.0)
end
it "allows name param without probability" do
Split.configuration.experiments[:my_experiment] = {
alternatives: [
{ name: "control_opt" },
"second_opt",
{ name: "third_opt", percent: 64 },
],
}
ab_test :my_experiment
experiment = Split::Experiment.new(:my_experiment)
names_and_weights = experiment.alternatives.collect { |a| [a.name, a.weight] }
expect(names_and_weights).to eq([["control_opt", 0.18], ["second_opt", 0.18], ["third_opt", 0.64]])
expect(names_and_weights.inject(0) { |sum, nw| sum + nw[1] }).to eq(1.0)
end
it "fails gracefully if config is missing experiment" do
Split.configuration.experiments = { other_experiment: { foo: "Bar" } }
expect { ab_test :my_experiment }.to raise_error(Split::ExperimentNotFound)
end
it "fails gracefully if config is missing" do
expect { Split.configuration.experiments = nil }.to raise_error(Split::InvalidExperimentsFormatError)
end
it "fails gracefully if config is missing alternatives" do
Split.configuration.experiments[:my_experiment] = { foo: "Bar" }
expect { ab_test :my_experiment }.to raise_error(NoMethodError)
end
end
it "should handle multiple experiments correctly" do
experiment2 = Split::ExperimentCatalog.find_or_create("link_color2", "blue", "red")
ab_test("link_color", "blue", "red")
ab_test("link_color2", "blue", "red")
ab_finished("link_color2")
experiment2.alternatives.each do |alt|
expect(alt.unfinished_count).to eq(0)
end
end
context "with goals" do
before do
@experiment = { "link_color" => ["purchase", "refund"] }
@alternatives = ["blue", "red"]
@experiment_name, @goals = normalize_metric(@experiment)
@goal1 = @goals[0]
@goal2 = @goals[1]
end
it "should normalize experiment" do
expect(@experiment_name).to eq("link_color")
expect(@goals).to eq(["purchase", "refund"])
end
describe "ab_test" do
it "should allow experiment goals interface as a single hash" do
ab_test(@experiment, *@alternatives)
experiment = Split::ExperimentCatalog.find("link_color")
expect(experiment.goals).to eq(["purchase", "refund"])
end
end
describe "ab_finished" do
before do
@alternative_name = ab_test(@experiment, *@alternatives)
end
it "should increment the counter for the specified-goal completed alternative" do
expect { ab_finished({ "link_color" => ["purchase"] }) }
.to change { Split::Alternative.new(@alternative_name, @experiment_name).completed_count(@goal2) }.by(0)
.and change { Split::Alternative.new(@alternative_name, @experiment_name).completed_count(@goal1) }.by(1)
end
end
end
end
<MSG> Fail gracefully when config file is missing or bad
This is better than raising obscure exceptions from the internals.
<DFF> @@ -649,6 +649,21 @@ describe Split::Helper do
names_and_weights.should == [['control_opt', 0.18], ['second_opt', 0.18], ['third_opt', 0.64]]
names_and_weights.inject(0){|sum, nw| sum + nw[1]}.should == 1.0
end
+
+ it "fails gracefully if config is missing experiment" do
+ Split.configuration.experiments = { :other_experiment => { :foo => "Bar" } }
+ lambda { ab_test :my_experiment }.should raise_error(/not found/i)
+ end
+
+ it "fails gracefully if config is missing" do
+ Split.configuration.experiments = nil
+ lambda { ab_test :my_experiment }.should raise_error(/not found/i)
+ end
+
+ it "fails gracefully if config is missing variants" do
+ Split.configuration.experiments[:my_experiment] = { :foo => "Bar" }
+ lambda { ab_test :my_experiment }.should raise_error(/variants/i)
+ end
end
it 'should handle multiple experiments correctly' do
| 15 | Fail gracefully when config file is missing or bad | 0 | .rb | rb | mit | splitrb/split |
10071083 | <NME> helper_spec.rb
<BEF> # frozen_string_literal: true
require "spec_helper"
# TODO change some of these tests to use Rack::Test
describe Split::Helper do
include Split::Helper
let(:experiment) {
Split::ExperimentCatalog.find_or_create("link_color", "blue", "red")
}
describe "ab_test" do
it "should not raise an error when passed strings for alternatives" do
expect { ab_test("xyz", "1", "2", "3") }.not_to raise_error
end
it "should not raise an error when passed an array for alternatives" do
expect { ab_test("xyz", ["1", "2", "3"]) }.not_to raise_error
end
it "should raise the appropriate error when passed integers for alternatives" do
expect { ab_test("xyz", 1, 2, 3) }.to raise_error(ArgumentError)
end
it "should raise the appropriate error when passed symbols for alternatives" do
expect { ab_test("xyz", :a, :b, :c) }.to raise_error(ArgumentError)
end
it "should not raise error when passed an array for goals" do
expect { ab_test({ "link_color" => ["purchase", "refund"] }, "blue", "red") }.not_to raise_error
end
it "should not raise error when passed just one goal" do
expect { ab_test({ "link_color" => "purchase" }, "blue", "red") }.not_to raise_error
end
it "raises an appropriate error when processing combined expirements" do
Split.configuration.experiments = {
combined_exp_1: {
alternatives: [ { name: "control", percent: 50 }, { name: "test-alt", percent: 50 } ],
metric: :my_metric,
combined_experiments: [:combined_exp_1_sub_1]
}
}
Split::ExperimentCatalog.find_or_create("combined_exp_1")
expect { ab_test("combined_exp_1") }.to raise_error(Split::InvalidExperimentsFormatError)
end
it "should assign a random alternative to a new user when there are an equal number of alternatives assigned" do
ab_test("link_color", "blue", "red")
expect(["red", "blue"]).to include(ab_user["link_color"])
end
it "should increment the participation counter after assignment to a new user" do
previous_red_count = Split::Alternative.new("red", "link_color").participant_count
previous_blue_count = Split::Alternative.new("blue", "link_color").participant_count
ab_test("link_color", "blue", "red")
new_red_count = Split::Alternative.new("red", "link_color").participant_count
new_blue_count = Split::Alternative.new("blue", "link_color").participant_count
expect((new_red_count + new_blue_count)).to eq(previous_red_count + previous_blue_count + 1)
end
it "should not increment the counter for an experiment that the user is not participating in" do
ab_test("link_color", "blue", "red")
e = Split::ExperimentCatalog.find_or_create("button_size", "small", "big")
expect {
# User shouldn't participate in this second experiment
ab_test("button_size", "small", "big")
}.not_to change { e.participant_count }
end
it "should not increment the counter for an ended experiment" do
e = Split::ExperimentCatalog.find_or_create("button_size", "small", "big")
e.winner = "small"
expect {
a = ab_test("button_size", "small", "big")
expect(a).to eq("small")
}.not_to change { e.participant_count }
end
it "should not increment the counter for an not started experiment" do
expect(Split.configuration).to receive(:start_manually).and_return(true)
e = Split::ExperimentCatalog.find_or_create("button_size", "small", "big")
expect {
a = ab_test("button_size", "small", "big")
expect(a).to eq("small")
}.not_to change { e.participant_count }
end
it "should return the given alternative for an existing user" do
expect(ab_test("link_color", "blue", "red")).to eq ab_test("link_color", "blue", "red")
end
it "should always return the winner if one is present" do
experiment.winner = "orange"
expect(ab_test("link_color", "blue", "red")).to eq("orange")
end
it "should allow the alternative to be forced by passing it in the params" do
# ?ab_test[link_color]=blue
@params = { "ab_test" => { "link_color" => "blue" } }
alternative = ab_test("link_color", "blue", "red")
expect(alternative).to eq("blue")
alternative = ab_test("link_color", { "blue" => 1 }, "red" => 5)
expect(alternative).to eq("blue")
@params = { "ab_test" => { "link_color" => "red" } }
alternative = ab_test("link_color", "blue", "red")
expect(alternative).to eq("red")
alternative = ab_test("link_color", { "blue" => 5 }, "red" => 1)
expect(alternative).to eq("red")
end
it "should not allow an arbitrary alternative" do
@params = { "ab_test" => { "link_color" => "pink" } }
alternative = ab_test("link_color", "blue")
expect(alternative).to eq("blue")
end
it "should not store the split when a param forced alternative" do
@params = { "ab_test" => { "link_color" => "blue" } }
expect(ab_user).not_to receive(:[]=)
ab_test("link_color", "blue", "red")
end
it "SPLIT_DISABLE query parameter should also force the alternative (uses control)" do
@params = { "SPLIT_DISABLE" => "true" }
alternative = ab_test("link_color", "blue", "red")
expect(alternative).to eq("blue")
alternative = ab_test("link_color", { "blue" => 1 }, "red" => 5)
expect(alternative).to eq("blue")
alternative = ab_test("link_color", "red", "blue")
expect(alternative).to eq("red")
alternative = ab_test("link_color", { "red" => 5 }, "blue" => 1)
expect(alternative).to eq("red")
end
it "should not store the split when Split generically disabled" do
@params = { "SPLIT_DISABLE" => "true" }
expect(ab_user).not_to receive(:[]=)
ab_test("link_color", "blue", "red")
end
context "when store_override is set" do
before { Split.configuration.store_override = true }
it "should store the forced alternative" do
@params = { "ab_test" => { "link_color" => "blue" } }
expect(ab_user).to receive(:[]=).with("link_color", "blue")
ab_test("link_color", "blue", "red")
end
end
context "when on_trial_choose is set" do
before { Split.configuration.on_trial_choose = :some_method }
it "should call the method" do
expect(self).to receive(:some_method)
ab_test("link_color", "blue", "red")
end
end
it "should allow passing a block" do
alt = ab_test("link_color", "blue", "red")
ret = ab_test("link_color", "blue", "red") { |alternative| "shared/#{alternative}" }
expect(ret).to eq("shared/#{alt}")
end
it "should allow the share of visitors see an alternative to be specified" do
ab_test("link_color", { "blue" => 0.8 }, { "red" => 20 })
expect(["red", "blue"]).to include(ab_user["link_color"])
end
it "should allow alternative weighting interface as a single hash" do
ab_test("link_color", { "blue" => 0.01 }, "red" => 0.2)
experiment = Split::ExperimentCatalog.find("link_color")
expect(experiment.alternatives.map(&:name)).to eq(["blue", "red"])
expect(experiment.alternatives.collect { |a| a.weight }).to match_array([0.01, 0.2])
end
it "should only let a user participate in one experiment at a time" do
link_color = ab_test("link_color", "blue", "red")
ab_test("button_size", "small", "big")
expect(ab_user["link_color"]).to eq(link_color)
big = Split::Alternative.new("big", "button_size")
expect(big.participant_count).to eq(0)
small = Split::Alternative.new("small", "button_size")
expect(small.participant_count).to eq(0)
end
it "should let a user participate in many experiment with allow_multiple_experiments option" do
Split.configure do |config|
config.allow_multiple_experiments = true
end
link_color = ab_test("link_color", "blue", "red")
button_size = ab_test("button_size", "small", "big")
expect(ab_user["link_color"]).to eq(link_color)
expect(ab_user["button_size"]).to eq(button_size)
button_size_alt = Split::Alternative.new(button_size, "button_size")
expect(button_size_alt.participant_count).to eq(1)
end
context "with allow_multiple_experiments = 'control'" do
it "should let a user participate in many experiment with one non-'control' alternative" do
Split.configure do |config|
config.allow_multiple_experiments = "control"
end
groups = 100.times.map do |n|
ab_test("test#{n}".to_sym, { "control" => (100 - n) }, { "test#{n}-alt" => n })
end
experiments = ab_user.active_experiments
expect(experiments.size).to be > 1
count_control = experiments.values.count { |g| g == "control" }
expect(count_control).to eq(experiments.size - 1)
count_alts = groups.count { |g| g != "control" }
expect(count_alts).to eq(1)
end
context "when user already has experiment" do
let(:mock_user) { Split::User.new(self, { "test_0" => "test-alt" }) }
before do
Split.configure do |config|
config.allow_multiple_experiments = "control"
end
Split::ExperimentCatalog.find_or_initialize("test_0", "control", "test-alt").save
Split::ExperimentCatalog.find_or_initialize("test_1", "control", "test-alt").save
end
it "should restore previously selected alternative" do
expect(ab_user.active_experiments.size).to eq 1
expect(ab_test(:test_0, { "control" => 100 }, { "test-alt" => 1 })).to eq "test-alt"
expect(ab_test(:test_0, { "control" => 1 }, { "test-alt" => 100 })).to eq "test-alt"
end
it "should select the correct alternatives after experiment resets" do
experiment = Split::ExperimentCatalog.find(:test_0)
experiment.reset
mock_user[experiment.key] = "test-alt"
expect(ab_user.active_experiments.size).to eq 1
expect(ab_test(:test_0, { "control" => 100 }, { "test-alt" => 1 })).to eq "test-alt"
expect(ab_test(:test_0, { "control" => 0 }, { "test-alt" => 100 })).to eq "test-alt"
end
it "lets override existing choice" do
pending "this requires user store reset on first call not depending on whelther it is current trial"
@params = { "ab_test" => { "test_1" => "test-alt" } }
expect(ab_test(:test_0, { "control" => 0 }, { "test-alt" => 100 })).to eq "control"
expect(ab_test(:test_1, { "control" => 100 }, { "test-alt" => 1 })).to eq "test-alt"
end
end
end
it "should not over-write a finished key when an experiment is on a later version" do
experiment.increment_version
ab_user = { experiment.key => "blue", experiment.finished_key => true }
finished_session = ab_user.dup
ab_test("link_color", "blue", "red")
expect(ab_user).to eq(finished_session)
end
end
describe "metadata" do
context "is defined" do
before do
Split.configuration.experiments = {
my_experiment: {
alternatives: ["one", "two"],
resettable: false,
metadata: { "one" => "Meta1", "two" => "Meta2" }
}
}
end
it "should be passed to helper block" do
@params = { "ab_test" => { "my_experiment" => "two" } }
expect(ab_test("my_experiment")).to eq "two"
expect(ab_test("my_experiment") do |alternative, meta|
meta
end).to eq("Meta2")
end
it "should pass control metadata helper block if library disabled" do
Split.configure do |config|
config.enabled = false
end
expect(ab_test("my_experiment")).to eq "one"
expect(ab_test("my_experiment") do |_, meta|
meta
end).to eq("Meta1")
end
end
context "is not defined" do
before do
Split.configuration.experiments = {
my_experiment: {
alternatives: ["one", "two"],
resettable: false,
metadata: nil
}
}
end
it "should be passed to helper block" do
expect(ab_test("my_experiment") do |alternative, meta|
meta
end).to eq({})
end
it "should pass control metadata helper block if library disabled" do
Split.configure do |config|
config.enabled = false
end
expect(ab_test("my_experiment") do |_, meta|
meta
end).to eq({})
end
end
end
describe "ab_finished" do
context "for an experiment that the user participates in" do
before(:each) do
@experiment_name = "link_color"
@alternatives = ["blue", "red"]
@experiment = Split::ExperimentCatalog.find_or_create(@experiment_name, *@alternatives)
@alternative_name = ab_test(@experiment_name, *@alternatives)
@previous_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count
end
it "should increment the counter for the completed alternative" do
ab_finished(@experiment_name)
new_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count
expect(new_completion_count).to eq(@previous_completion_count + 1)
end
it "should set experiment's finished key if reset is false" do
ab_finished(@experiment_name, { reset: false })
expect(ab_user[@experiment.key]).to eq(@alternative_name)
expect(ab_user[@experiment.finished_key]).to eq(true)
end
it "should not increment the counter if reset is false and the experiment has been already finished" do
2.times { ab_finished(@experiment_name, { reset: false }) }
new_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count
expect(new_completion_count).to eq(@previous_completion_count + 1)
end
it "should not increment the counter for an ended experiment" do
e = Split::ExperimentCatalog.find_or_create("button_size", "small", "big")
e.winner = "small"
a = ab_test("button_size", "small", "big")
expect(a).to eq("small")
expect {
ab_finished("button_size")
}.not_to change { Split::Alternative.new(a, "button_size").completed_count }
end
it "should clear out the user's participation from their session" do
expect(ab_user[@experiment.key]).to eq(@alternative_name)
ab_finished(@experiment_name)
expect(ab_user.keys).to be_empty
end
it "should not clear out the users session if reset is false" do
expect(ab_user[@experiment.key]).to eq(@alternative_name)
ab_finished(@experiment_name, { reset: false })
expect(ab_user[@experiment.key]).to eq(@alternative_name)
expect(ab_user[@experiment.finished_key]).to eq(true)
end
it "should reset the users session when experiment is not versioned" do
expect(ab_user[@experiment.key]).to eq(@alternative_name)
ab_finished(@experiment_name)
expect(ab_user.keys).to be_empty
end
it "should reset the users session when experiment is versioned" do
@experiment.increment_version
@alternative_name = ab_test(@experiment_name, *@alternatives)
expect(ab_user[@experiment.key]).to eq(@alternative_name)
ab_finished(@experiment_name)
expect(ab_user.keys).to be_empty
end
context "when on_trial_complete is set" do
before { Split.configuration.on_trial_complete = :some_method }
it "should call the method" do
expect(self).to receive(:some_method)
ab_finished(@experiment_name)
end
it "should not call the method without alternative" do
ab_user[@experiment.key] = nil
expect(self).not_to receive(:some_method)
ab_finished(@experiment_name)
end
end
end
context "for an experiment that the user is excluded from" do
before do
alternative = ab_test("link_color", "blue", "red")
expect(Split::Alternative.new(alternative, "link_color").participant_count).to eq(1)
alternative = ab_test("button_size", "small", "big")
expect(Split::Alternative.new(alternative, "button_size").participant_count).to eq(0)
end
it "should not increment the completed counter" do
# So, user should be participating in the link_color experiment and
# receive the control for button_size. As the user is not participating in
# the button size experiment, finishing it should not increase the
# completion count for that alternative.
expect {
ab_finished("button_size")
}.not_to change { Split::Alternative.new("small", "button_size").completed_count }
end
end
context "for an experiment that the user does not participate in" do
before do
Split::ExperimentCatalog.find_or_create(:not_started_experiment, "control", "alt")
end
it "should not raise an exception" do
expect { ab_finished(:not_started_experiment) }.not_to raise_exception
end
it "should not change the user state when reset is false" do
expect { ab_finished(:not_started_experiment, reset: false) }.not_to change { ab_user.keys }.from([])
end
it "should not change the user state when reset is true" do
expect(self).not_to receive(:reset!)
ab_finished(:not_started_experiment)
end
it "should not increment the completed counter" do
ab_finished(:not_started_experiment)
expect(Split::Alternative.new("control", :not_started_experiment).completed_count).to eq(0)
expect(Split::Alternative.new("alt", :not_started_experiment).completed_count).to eq(0)
end
end
end
context "finished with config" do
it "passes reset option" do
Split.configuration.experiments = {
my_experiment: {
alternatives: ["one", "two"],
resettable: false,
}
}
alternative = ab_test(:my_experiment)
experiment = Split::ExperimentCatalog.find :my_experiment
ab_finished :my_experiment
expect(ab_user[experiment.key]).to eq(alternative)
expect(ab_user[experiment.finished_key]).to eq(true)
end
end
context "finished with metric name" do
before { Split.configuration.experiments = {} }
before { expect(Split::Alternative).to receive(:new).at_least(1).times.and_call_original }
def should_finish_experiment(experiment_name, should_finish = true)
alts = Split.configuration.experiments[experiment_name][:alternatives]
experiment = Split::ExperimentCatalog.find_or_create(experiment_name, *alts)
alt_name = ab_user[experiment.key] = alts.first
alt = double("alternative")
expect(alt).to receive(:name).at_most(1).times.and_return(alt_name)
expect(Split::Alternative).to receive(:new).at_most(1).times.with(alt_name, experiment_name.to_s).and_return(alt)
if should_finish
expect(alt).to receive(:increment_completion).at_most(1).times
else
expect(alt).not_to receive(:increment_completion)
end
end
it "completes the test" do
Split.configuration.experiments[:my_experiment] = {
alternatives: [ "control_opt", "other_opt" ],
metric: :my_metric
}
should_finish_experiment :my_experiment
ab_finished :my_metric
end
it "completes all relevant tests" do
Split.configuration.experiments = {
exp_1: {
alternatives: [ "1-1", "1-2" ],
metric: :my_metric
},
exp_2: {
alternatives: [ "2-1", "2-2" ],
metric: :another_metric
},
exp_3: {
alternatives: [ "3-1", "3-2" ],
metric: :my_metric
},
}
should_finish_experiment :exp_1
should_finish_experiment :exp_2, false
should_finish_experiment :exp_3
ab_finished :my_metric
end
it "passes reset option" do
Split.configuration.experiments = {
my_exp: {
alternatives: ["one", "two"],
metric: :my_metric,
resettable: false,
}
}
alternative_name = ab_test(:my_exp)
exp = Split::ExperimentCatalog.find :my_exp
ab_finished :my_metric
expect(ab_user[exp.key]).to eq(alternative_name)
expect(ab_user[exp.finished_key]).to be_truthy
end
it "passes through options" do
Split.configuration.experiments = {
my_exp: {
alternatives: ["one", "two"],
metric: :my_metric,
}
}
alternative_name = ab_test(:my_exp)
exp = Split::ExperimentCatalog.find :my_exp
ab_finished :my_metric, reset: false
expect(ab_user[exp.key]).to eq(alternative_name)
expect(ab_user[exp.finished_key]).to be_truthy
end
end
describe "conversions" do
it "should return a conversion rate for an alternative" do
alternative_name = ab_test("link_color", "blue", "red")
previous_convertion_rate = Split::Alternative.new(alternative_name, "link_color").conversion_rate
expect(previous_convertion_rate).to eq(0.0)
ab_finished("link_color")
new_convertion_rate = Split::Alternative.new(alternative_name, "link_color").conversion_rate
expect(new_convertion_rate).to eq(1.0)
end
end
describe "active experiments" do
it "should show an active test" do
alternative = ab_test("def", "4", "5", "6")
expect(active_experiments.count).to eq 1
expect(active_experiments.first[0]).to eq "def"
expect(active_experiments.first[1]).to eq alternative
end
it "should show a finished test" do
alternative = ab_test("def", "4", "5", "6")
ab_finished("def", { reset: false })
expect(active_experiments.count).to eq 1
expect(active_experiments.first[0]).to eq "def"
expect(active_experiments.first[1]).to eq alternative
end
it "should show an active test when an experiment is on a later version" do
experiment.reset
expect(experiment.version).to eq(1)
ab_test("link_color", "blue", "red")
expect(active_experiments.count).to eq 1
expect(active_experiments.first[0]).to eq "link_color"
end
it "should show versioned tests properly" do
10.times { experiment.reset }
alternative = ab_test(experiment.name, "blue", "red")
ab_finished(experiment.name, reset: false)
expect(experiment.version).to eq(10)
expect(active_experiments.count).to eq 1
expect(active_experiments).to eq({ "link_color" => alternative })
end
it "should show multiple tests" do
Split.configure do |config|
config.allow_multiple_experiments = true
end
alternative = ab_test("def", "4", "5", "6")
another_alternative = ab_test("ghi", "7", "8", "9")
expect(active_experiments.count).to eq 2
expect(active_experiments["def"]).to eq alternative
expect(active_experiments["ghi"]).to eq another_alternative
end
it "should not show tests with winners" do
Split.configure do |config|
config.allow_multiple_experiments = true
end
e = Split::ExperimentCatalog.find_or_create("def", "4", "5", "6")
e.winner = "4"
ab_test("def", "4", "5", "6")
another_alternative = ab_test("ghi", "7", "8", "9")
expect(active_experiments.count).to eq 1
expect(active_experiments.first[0]).to eq "ghi"
expect(active_experiments.first[1]).to eq another_alternative
end
end
describe "when user is a robot" do
before(:each) do
@request = OpenStruct.new(user_agent: "Googlebot/2.1 (+http://www.google.com/bot.html)")
end
describe "ab_test" do
it "should return the control" do
alternative = ab_test("link_color", "blue", "red")
expect(alternative).to eq experiment.control.name
end
it "should not create a experiment" do
ab_test("link_color", "blue", "red")
expect(Split::Experiment.new("link_color")).to be_a_new_record
names_and_weights.should == [['control_opt', 0.18], ['second_opt', 0.18], ['third_opt', 0.64]]
names_and_weights.inject(0){|sum, nw| sum + nw[1]}.should == 1.0
end
end
it 'should handle multiple experiments correctly' do
ab_finished("link_color")
new_completion_count = Split::Alternative.new(alternative_name, "link_color").completed_count
expect(new_completion_count).to eq(previous_completion_count)
end
end
end
describe "when providing custom ignore logic" do
context "using a proc to configure custom logic" do
before(:each) do
Split.configure do |c|
c.ignore_filter = proc { |request| true } # ignore everything
end
end
it "ignores the ab_test" do
ab_test("link_color", "blue", "red")
red_count = Split::Alternative.new("red", "link_color").participant_count
blue_count = Split::Alternative.new("blue", "link_color").participant_count
expect((red_count + blue_count)).to be(0)
end
end
end
shared_examples_for "a disabled test" do
describe "ab_test" do
it "should return the control" do
alternative = ab_test("link_color", "blue", "red")
expect(alternative).to eq experiment.control.name
end
it "should not increment the participation count" do
previous_red_count = Split::Alternative.new("red", "link_color").participant_count
previous_blue_count = Split::Alternative.new("blue", "link_color").participant_count
ab_test("link_color", "blue", "red")
new_red_count = Split::Alternative.new("red", "link_color").participant_count
new_blue_count = Split::Alternative.new("blue", "link_color").participant_count
expect((new_red_count + new_blue_count)).to eq(previous_red_count + previous_blue_count)
end
end
describe "finished" do
it "should not increment the completed count" do
alternative_name = ab_test("link_color", "blue", "red")
previous_completion_count = Split::Alternative.new(alternative_name, "link_color").completed_count
ab_finished("link_color")
new_completion_count = Split::Alternative.new(alternative_name, "link_color").completed_count
expect(new_completion_count).to eq(previous_completion_count)
end
end
end
describe "when ip address is ignored" do
context "individually" do
before(:each) do
@request = OpenStruct.new(ip: "81.19.48.130")
Split.configure do |c|
c.ignore_ip_addresses << "81.19.48.130"
end
end
it_behaves_like "a disabled test"
end
context "for a range" do
before(:each) do
@request = OpenStruct.new(ip: "81.19.48.129")
Split.configure do |c|
c.ignore_ip_addresses << /81\.19\.48\.[0-9]+/
end
end
it_behaves_like "a disabled test"
end
context "using both a range and a specific value" do
before(:each) do
@request = OpenStruct.new(ip: "81.19.48.128")
Split.configure do |c|
c.ignore_ip_addresses << "81.19.48.130"
c.ignore_ip_addresses << /81\.19\.48\.[0-9]+/
end
end
it_behaves_like "a disabled test"
end
context "when ignored other address" do
before do
@request = OpenStruct.new(ip: "1.1.1.1")
Split.configure do |c|
c.ignore_ip_addresses << "81.19.48.130"
end
end
it "works as usual" do
alternative_name = ab_test("link_color", "red", "blue")
expect {
ab_finished("link_color")
}.to change(Split::Alternative.new(alternative_name, "link_color"), :completed_count).by(1)
end
end
end
describe "when user is previewing" do
before(:each) do
@request = OpenStruct.new(headers: { "x-purpose" => "preview" })
end
it_behaves_like "a disabled test"
end
describe "versioned experiments" do
it "should use version zero if no version is present" do
alternative_name = ab_test("link_color", "blue", "red")
expect(experiment.version).to eq(0)
expect(ab_user["link_color"]).to eq(alternative_name)
end
it "should save the version of the experiment to the session" do
experiment.reset
expect(experiment.version).to eq(1)
alternative_name = ab_test("link_color", "blue", "red")
expect(ab_user["link_color:1"]).to eq(alternative_name)
end
it "should load the experiment even if the version is not 0" do
experiment.reset
expect(experiment.version).to eq(1)
alternative_name = ab_test("link_color", "blue", "red")
expect(ab_user["link_color:1"]).to eq(alternative_name)
return_alternative_name = ab_test("link_color", "blue", "red")
expect(return_alternative_name).to eq(alternative_name)
end
it "should reset the session of a user on an older version of the experiment" do
alternative_name = ab_test("link_color", "blue", "red")
expect(ab_user["link_color"]).to eq(alternative_name)
alternative = Split::Alternative.new(alternative_name, "link_color")
expect(alternative.participant_count).to eq(1)
experiment.reset
expect(experiment.version).to eq(1)
alternative = Split::Alternative.new(alternative_name, "link_color")
expect(alternative.participant_count).to eq(0)
new_alternative_name = ab_test("link_color", "blue", "red")
expect(ab_user["link_color:1"]).to eq(new_alternative_name)
new_alternative = Split::Alternative.new(new_alternative_name, "link_color")
expect(new_alternative.participant_count).to eq(1)
end
it "should cleanup old versions of experiments from the session" do
alternative_name = ab_test("link_color", "blue", "red")
expect(ab_user["link_color"]).to eq(alternative_name)
alternative = Split::Alternative.new(alternative_name, "link_color")
expect(alternative.participant_count).to eq(1)
experiment.reset
expect(experiment.version).to eq(1)
alternative = Split::Alternative.new(alternative_name, "link_color")
expect(alternative.participant_count).to eq(0)
new_alternative_name = ab_test("link_color", "blue", "red")
expect(ab_user["link_color:1"]).to eq(new_alternative_name)
end
it "should only count completion of users on the current version" do
alternative_name = ab_test("link_color", "blue", "red")
expect(ab_user["link_color"]).to eq(alternative_name)
Split::Alternative.new(alternative_name, "link_color")
experiment.reset
expect(experiment.version).to eq(1)
ab_finished("link_color")
alternative = Split::Alternative.new(alternative_name, "link_color")
expect(alternative.completed_count).to eq(0)
end
end
context "when redis is not available" do
before(:each) do
expect(Split).to receive(:redis).at_most(5).times.and_raise(Errno::ECONNREFUSED.new)
end
context "and db_failover config option is turned off" do
before(:each) do
Split.configure do |config|
config.db_failover = false
end
end
describe "ab_test" do
it "should raise an exception" do
expect { ab_test("link_color", "blue", "red") }.to raise_error(Errno::ECONNREFUSED)
end
end
describe "finished" do
it "should raise an exception" do
expect { ab_finished("link_color") }.to raise_error(Errno::ECONNREFUSED)
end
end
describe "disable split testing" do
before(:each) do
Split.configure do |config|
config.enabled = false
end
end
it "should not attempt to connect to redis" do
expect { ab_test("link_color", "blue", "red") }.not_to raise_error
end
it "should return control variable" do
expect(ab_test("link_color", "blue", "red")).to eq("blue")
expect { ab_finished("link_color") }.not_to raise_error
end
end
end
context "and db_failover config option is turned on" do
before(:each) do
Split.configure do |config|
config.db_failover = true
end
end
describe "ab_test" do
it "should not raise an exception" do
expect { ab_test("link_color", "blue", "red") }.not_to raise_error
end
it "should call db_failover_on_db_error proc with error as parameter" do
Split.configure do |config|
config.db_failover_on_db_error = proc do |error|
expect(error).to be_a(Errno::ECONNREFUSED)
end
end
expect(Split.configuration.db_failover_on_db_error).to receive(:call).and_call_original
ab_test("link_color", "blue", "red")
end
it "should always use first alternative" do
expect(ab_test("link_color", "blue", "red")).to eq("blue")
expect(ab_test("link_color", { "blue" => 0.01 }, "red" => 0.2)).to eq("blue")
expect(ab_test("link_color", { "blue" => 0.8 }, { "red" => 20 })).to eq("blue")
expect(ab_test("link_color", "blue", "red") do |alternative|
"shared/#{alternative}"
end).to eq("shared/blue")
end
context "and db_failover_allow_parameter_override config option is turned on" do
before(:each) do
Split.configure do |config|
config.db_failover_allow_parameter_override = true
end
end
context "and given an override parameter" do
it "should use given override instead of the first alternative" do
@params = { "ab_test" => { "link_color" => "red" } }
expect(ab_test("link_color", "blue", "red")).to eq("red")
expect(ab_test("link_color", "blue", "red", "green")).to eq("red")
expect(ab_test("link_color", { "blue" => 0.01 }, "red" => 0.2)).to eq("red")
expect(ab_test("link_color", { "blue" => 0.8 }, { "red" => 20 })).to eq("red")
expect(ab_test("link_color", "blue", "red") do |alternative|
"shared/#{alternative}"
end).to eq("shared/red")
end
end
end
context "and preloaded config given" do
before do
Split.configuration.experiments[:link_color] = {
alternatives: [ "blue", "red" ],
}
end
it "uses first alternative" do
expect(ab_test(:link_color)).to eq("blue")
end
end
end
describe "finished" do
it "should not raise an exception" do
expect { ab_finished("link_color") }.not_to raise_error
end
it "should call db_failover_on_db_error proc with error as parameter" do
Split.configure do |config|
config.db_failover_on_db_error = proc do |error|
expect(error).to be_a(Errno::ECONNREFUSED)
end
end
expect(Split.configuration.db_failover_on_db_error).to receive(:call).and_call_original
ab_finished("link_color")
end
end
end
end
context "with preloaded config" do
before { Split.configuration.experiments = {} }
it "pulls options from config file" do
Split.configuration.experiments[:my_experiment] = {
alternatives: [ "control_opt", "other_opt" ],
goals: ["goal1", "goal2"]
}
ab_test :my_experiment
expect(Split::Experiment.new(:my_experiment).alternatives.map(&:name)).to eq([ "control_opt", "other_opt" ])
expect(Split::Experiment.new(:my_experiment).goals).to eq([ "goal1", "goal2" ])
end
it "can be called multiple times" do
Split.configuration.experiments[:my_experiment] = {
alternatives: [ "control_opt", "other_opt" ],
goals: ["goal1", "goal2"]
}
5.times { ab_test :my_experiment }
experiment = Split::Experiment.new(:my_experiment)
expect(experiment.alternatives.map(&:name)).to eq([ "control_opt", "other_opt" ])
expect(experiment.goals).to eq([ "goal1", "goal2" ])
expect(experiment.participant_count).to eq(1)
end
it "accepts multiple goals" do
Split.configuration.experiments[:my_experiment] = {
alternatives: [ "control_opt", "other_opt" ],
goals: [ "goal1", "goal2", "goal3" ]
}
ab_test :my_experiment
experiment = Split::Experiment.new(:my_experiment)
expect(experiment.goals).to eq([ "goal1", "goal2", "goal3" ])
end
it "allow specifying goals to be optional" do
Split.configuration.experiments[:my_experiment] = {
alternatives: [ "control_opt", "other_opt" ]
}
experiment = Split::Experiment.new(:my_experiment)
expect(experiment.goals).to eq([])
end
it "accepts multiple alternatives" do
Split.configuration.experiments[:my_experiment] = {
alternatives: [ "control_opt", "second_opt", "third_opt" ],
}
ab_test :my_experiment
experiment = Split::Experiment.new(:my_experiment)
expect(experiment.alternatives.map(&:name)).to eq([ "control_opt", "second_opt", "third_opt" ])
end
it "accepts probability on alternatives" do
Split.configuration.experiments[:my_experiment] = {
alternatives: [
{ name: "control_opt", percent: 67 },
{ name: "second_opt", percent: 10 },
{ name: "third_opt", percent: 23 },
],
}
ab_test :my_experiment
experiment = Split::Experiment.new(:my_experiment)
expect(experiment.alternatives.collect { |a| [a.name, a.weight] }).to eq([["control_opt", 0.67], ["second_opt", 0.1], ["third_opt", 0.23]])
end
it "accepts probability on some alternatives" do
Split.configuration.experiments[:my_experiment] = {
alternatives: [
{ name: "control_opt", percent: 34 },
"second_opt",
{ name: "third_opt", percent: 23 },
"fourth_opt",
],
}
ab_test :my_experiment
experiment = Split::Experiment.new(:my_experiment)
names_and_weights = experiment.alternatives.collect { |a| [a.name, a.weight] }
expect(names_and_weights).to eq([["control_opt", 0.34], ["second_opt", 0.215], ["third_opt", 0.23], ["fourth_opt", 0.215]])
expect(names_and_weights.inject(0) { |sum, nw| sum + nw[1] }).to eq(1.0)
end
it "allows name param without probability" do
Split.configuration.experiments[:my_experiment] = {
alternatives: [
{ name: "control_opt" },
"second_opt",
{ name: "third_opt", percent: 64 },
],
}
ab_test :my_experiment
experiment = Split::Experiment.new(:my_experiment)
names_and_weights = experiment.alternatives.collect { |a| [a.name, a.weight] }
expect(names_and_weights).to eq([["control_opt", 0.18], ["second_opt", 0.18], ["third_opt", 0.64]])
expect(names_and_weights.inject(0) { |sum, nw| sum + nw[1] }).to eq(1.0)
end
it "fails gracefully if config is missing experiment" do
Split.configuration.experiments = { other_experiment: { foo: "Bar" } }
expect { ab_test :my_experiment }.to raise_error(Split::ExperimentNotFound)
end
it "fails gracefully if config is missing" do
expect { Split.configuration.experiments = nil }.to raise_error(Split::InvalidExperimentsFormatError)
end
it "fails gracefully if config is missing alternatives" do
Split.configuration.experiments[:my_experiment] = { foo: "Bar" }
expect { ab_test :my_experiment }.to raise_error(NoMethodError)
end
end
it "should handle multiple experiments correctly" do
experiment2 = Split::ExperimentCatalog.find_or_create("link_color2", "blue", "red")
ab_test("link_color", "blue", "red")
ab_test("link_color2", "blue", "red")
ab_finished("link_color2")
experiment2.alternatives.each do |alt|
expect(alt.unfinished_count).to eq(0)
end
end
context "with goals" do
before do
@experiment = { "link_color" => ["purchase", "refund"] }
@alternatives = ["blue", "red"]
@experiment_name, @goals = normalize_metric(@experiment)
@goal1 = @goals[0]
@goal2 = @goals[1]
end
it "should normalize experiment" do
expect(@experiment_name).to eq("link_color")
expect(@goals).to eq(["purchase", "refund"])
end
describe "ab_test" do
it "should allow experiment goals interface as a single hash" do
ab_test(@experiment, *@alternatives)
experiment = Split::ExperimentCatalog.find("link_color")
expect(experiment.goals).to eq(["purchase", "refund"])
end
end
describe "ab_finished" do
before do
@alternative_name = ab_test(@experiment, *@alternatives)
end
it "should increment the counter for the specified-goal completed alternative" do
expect { ab_finished({ "link_color" => ["purchase"] }) }
.to change { Split::Alternative.new(@alternative_name, @experiment_name).completed_count(@goal2) }.by(0)
.and change { Split::Alternative.new(@alternative_name, @experiment_name).completed_count(@goal1) }.by(1)
end
end
end
end
<MSG> Fail gracefully when config file is missing or bad
This is better than raising obscure exceptions from the internals.
<DFF> @@ -649,6 +649,21 @@ describe Split::Helper do
names_and_weights.should == [['control_opt', 0.18], ['second_opt', 0.18], ['third_opt', 0.64]]
names_and_weights.inject(0){|sum, nw| sum + nw[1]}.should == 1.0
end
+
+ it "fails gracefully if config is missing experiment" do
+ Split.configuration.experiments = { :other_experiment => { :foo => "Bar" } }
+ lambda { ab_test :my_experiment }.should raise_error(/not found/i)
+ end
+
+ it "fails gracefully if config is missing" do
+ Split.configuration.experiments = nil
+ lambda { ab_test :my_experiment }.should raise_error(/not found/i)
+ end
+
+ it "fails gracefully if config is missing variants" do
+ Split.configuration.experiments[:my_experiment] = { :foo => "Bar" }
+ lambda { ab_test :my_experiment }.should raise_error(/variants/i)
+ end
end
it 'should handle multiple experiments correctly' do
| 15 | Fail gracefully when config file is missing or bad | 0 | .rb | rb | mit | splitrb/split |
10071084 | <NME> helper_spec.rb
<BEF> # frozen_string_literal: true
require "spec_helper"
# TODO change some of these tests to use Rack::Test
describe Split::Helper do
include Split::Helper
let(:experiment) {
Split::ExperimentCatalog.find_or_create("link_color", "blue", "red")
}
describe "ab_test" do
it "should not raise an error when passed strings for alternatives" do
expect { ab_test("xyz", "1", "2", "3") }.not_to raise_error
end
it "should not raise an error when passed an array for alternatives" do
expect { ab_test("xyz", ["1", "2", "3"]) }.not_to raise_error
end
it "should raise the appropriate error when passed integers for alternatives" do
expect { ab_test("xyz", 1, 2, 3) }.to raise_error(ArgumentError)
end
it "should raise the appropriate error when passed symbols for alternatives" do
expect { ab_test("xyz", :a, :b, :c) }.to raise_error(ArgumentError)
end
it "should not raise error when passed an array for goals" do
expect { ab_test({ "link_color" => ["purchase", "refund"] }, "blue", "red") }.not_to raise_error
end
it "should not raise error when passed just one goal" do
expect { ab_test({ "link_color" => "purchase" }, "blue", "red") }.not_to raise_error
end
it "raises an appropriate error when processing combined expirements" do
Split.configuration.experiments = {
combined_exp_1: {
alternatives: [ { name: "control", percent: 50 }, { name: "test-alt", percent: 50 } ],
metric: :my_metric,
combined_experiments: [:combined_exp_1_sub_1]
}
}
Split::ExperimentCatalog.find_or_create("combined_exp_1")
expect { ab_test("combined_exp_1") }.to raise_error(Split::InvalidExperimentsFormatError)
end
it "should assign a random alternative to a new user when there are an equal number of alternatives assigned" do
ab_test("link_color", "blue", "red")
expect(["red", "blue"]).to include(ab_user["link_color"])
end
it "should increment the participation counter after assignment to a new user" do
previous_red_count = Split::Alternative.new("red", "link_color").participant_count
previous_blue_count = Split::Alternative.new("blue", "link_color").participant_count
ab_test("link_color", "blue", "red")
new_red_count = Split::Alternative.new("red", "link_color").participant_count
new_blue_count = Split::Alternative.new("blue", "link_color").participant_count
expect((new_red_count + new_blue_count)).to eq(previous_red_count + previous_blue_count + 1)
end
it "should not increment the counter for an experiment that the user is not participating in" do
ab_test("link_color", "blue", "red")
e = Split::ExperimentCatalog.find_or_create("button_size", "small", "big")
expect {
# User shouldn't participate in this second experiment
ab_test("button_size", "small", "big")
}.not_to change { e.participant_count }
end
it "should not increment the counter for an ended experiment" do
e = Split::ExperimentCatalog.find_or_create("button_size", "small", "big")
e.winner = "small"
expect {
a = ab_test("button_size", "small", "big")
expect(a).to eq("small")
}.not_to change { e.participant_count }
end
it "should not increment the counter for an not started experiment" do
expect(Split.configuration).to receive(:start_manually).and_return(true)
e = Split::ExperimentCatalog.find_or_create("button_size", "small", "big")
expect {
a = ab_test("button_size", "small", "big")
expect(a).to eq("small")
}.not_to change { e.participant_count }
end
it "should return the given alternative for an existing user" do
expect(ab_test("link_color", "blue", "red")).to eq ab_test("link_color", "blue", "red")
end
it "should always return the winner if one is present" do
experiment.winner = "orange"
expect(ab_test("link_color", "blue", "red")).to eq("orange")
end
it "should allow the alternative to be forced by passing it in the params" do
# ?ab_test[link_color]=blue
@params = { "ab_test" => { "link_color" => "blue" } }
alternative = ab_test("link_color", "blue", "red")
expect(alternative).to eq("blue")
alternative = ab_test("link_color", { "blue" => 1 }, "red" => 5)
expect(alternative).to eq("blue")
@params = { "ab_test" => { "link_color" => "red" } }
alternative = ab_test("link_color", "blue", "red")
expect(alternative).to eq("red")
alternative = ab_test("link_color", { "blue" => 5 }, "red" => 1)
expect(alternative).to eq("red")
end
it "should not allow an arbitrary alternative" do
@params = { "ab_test" => { "link_color" => "pink" } }
alternative = ab_test("link_color", "blue")
expect(alternative).to eq("blue")
end
it "should not store the split when a param forced alternative" do
@params = { "ab_test" => { "link_color" => "blue" } }
expect(ab_user).not_to receive(:[]=)
ab_test("link_color", "blue", "red")
end
it "SPLIT_DISABLE query parameter should also force the alternative (uses control)" do
@params = { "SPLIT_DISABLE" => "true" }
alternative = ab_test("link_color", "blue", "red")
expect(alternative).to eq("blue")
alternative = ab_test("link_color", { "blue" => 1 }, "red" => 5)
expect(alternative).to eq("blue")
alternative = ab_test("link_color", "red", "blue")
expect(alternative).to eq("red")
alternative = ab_test("link_color", { "red" => 5 }, "blue" => 1)
expect(alternative).to eq("red")
end
it "should not store the split when Split generically disabled" do
@params = { "SPLIT_DISABLE" => "true" }
expect(ab_user).not_to receive(:[]=)
ab_test("link_color", "blue", "red")
end
context "when store_override is set" do
before { Split.configuration.store_override = true }
it "should store the forced alternative" do
@params = { "ab_test" => { "link_color" => "blue" } }
expect(ab_user).to receive(:[]=).with("link_color", "blue")
ab_test("link_color", "blue", "red")
end
end
context "when on_trial_choose is set" do
before { Split.configuration.on_trial_choose = :some_method }
it "should call the method" do
expect(self).to receive(:some_method)
ab_test("link_color", "blue", "red")
end
end
it "should allow passing a block" do
alt = ab_test("link_color", "blue", "red")
ret = ab_test("link_color", "blue", "red") { |alternative| "shared/#{alternative}" }
expect(ret).to eq("shared/#{alt}")
end
it "should allow the share of visitors see an alternative to be specified" do
ab_test("link_color", { "blue" => 0.8 }, { "red" => 20 })
expect(["red", "blue"]).to include(ab_user["link_color"])
end
it "should allow alternative weighting interface as a single hash" do
ab_test("link_color", { "blue" => 0.01 }, "red" => 0.2)
experiment = Split::ExperimentCatalog.find("link_color")
expect(experiment.alternatives.map(&:name)).to eq(["blue", "red"])
expect(experiment.alternatives.collect { |a| a.weight }).to match_array([0.01, 0.2])
end
it "should only let a user participate in one experiment at a time" do
link_color = ab_test("link_color", "blue", "red")
ab_test("button_size", "small", "big")
expect(ab_user["link_color"]).to eq(link_color)
big = Split::Alternative.new("big", "button_size")
expect(big.participant_count).to eq(0)
small = Split::Alternative.new("small", "button_size")
expect(small.participant_count).to eq(0)
end
it "should let a user participate in many experiment with allow_multiple_experiments option" do
Split.configure do |config|
config.allow_multiple_experiments = true
end
link_color = ab_test("link_color", "blue", "red")
button_size = ab_test("button_size", "small", "big")
expect(ab_user["link_color"]).to eq(link_color)
expect(ab_user["button_size"]).to eq(button_size)
button_size_alt = Split::Alternative.new(button_size, "button_size")
expect(button_size_alt.participant_count).to eq(1)
end
context "with allow_multiple_experiments = 'control'" do
it "should let a user participate in many experiment with one non-'control' alternative" do
Split.configure do |config|
config.allow_multiple_experiments = "control"
end
groups = 100.times.map do |n|
ab_test("test#{n}".to_sym, { "control" => (100 - n) }, { "test#{n}-alt" => n })
end
experiments = ab_user.active_experiments
expect(experiments.size).to be > 1
count_control = experiments.values.count { |g| g == "control" }
expect(count_control).to eq(experiments.size - 1)
count_alts = groups.count { |g| g != "control" }
expect(count_alts).to eq(1)
end
context "when user already has experiment" do
let(:mock_user) { Split::User.new(self, { "test_0" => "test-alt" }) }
before do
Split.configure do |config|
config.allow_multiple_experiments = "control"
end
Split::ExperimentCatalog.find_or_initialize("test_0", "control", "test-alt").save
Split::ExperimentCatalog.find_or_initialize("test_1", "control", "test-alt").save
end
it "should restore previously selected alternative" do
expect(ab_user.active_experiments.size).to eq 1
expect(ab_test(:test_0, { "control" => 100 }, { "test-alt" => 1 })).to eq "test-alt"
expect(ab_test(:test_0, { "control" => 1 }, { "test-alt" => 100 })).to eq "test-alt"
end
it "should select the correct alternatives after experiment resets" do
experiment = Split::ExperimentCatalog.find(:test_0)
experiment.reset
mock_user[experiment.key] = "test-alt"
expect(ab_user.active_experiments.size).to eq 1
expect(ab_test(:test_0, { "control" => 100 }, { "test-alt" => 1 })).to eq "test-alt"
expect(ab_test(:test_0, { "control" => 0 }, { "test-alt" => 100 })).to eq "test-alt"
end
it "lets override existing choice" do
pending "this requires user store reset on first call not depending on whelther it is current trial"
@params = { "ab_test" => { "test_1" => "test-alt" } }
expect(ab_test(:test_0, { "control" => 0 }, { "test-alt" => 100 })).to eq "control"
expect(ab_test(:test_1, { "control" => 100 }, { "test-alt" => 1 })).to eq "test-alt"
end
end
end
it "should not over-write a finished key when an experiment is on a later version" do
experiment.increment_version
ab_user = { experiment.key => "blue", experiment.finished_key => true }
finished_session = ab_user.dup
ab_test("link_color", "blue", "red")
expect(ab_user).to eq(finished_session)
end
end
describe "metadata" do
context "is defined" do
before do
Split.configuration.experiments = {
my_experiment: {
alternatives: ["one", "two"],
resettable: false,
metadata: { "one" => "Meta1", "two" => "Meta2" }
}
}
end
it "should be passed to helper block" do
@params = { "ab_test" => { "my_experiment" => "two" } }
expect(ab_test("my_experiment")).to eq "two"
expect(ab_test("my_experiment") do |alternative, meta|
meta
end).to eq("Meta2")
end
it "should pass control metadata helper block if library disabled" do
Split.configure do |config|
config.enabled = false
end
expect(ab_test("my_experiment")).to eq "one"
expect(ab_test("my_experiment") do |_, meta|
meta
end).to eq("Meta1")
end
end
context "is not defined" do
before do
Split.configuration.experiments = {
my_experiment: {
alternatives: ["one", "two"],
resettable: false,
metadata: nil
}
}
end
it "should be passed to helper block" do
expect(ab_test("my_experiment") do |alternative, meta|
meta
end).to eq({})
end
it "should pass control metadata helper block if library disabled" do
Split.configure do |config|
config.enabled = false
end
expect(ab_test("my_experiment") do |_, meta|
meta
end).to eq({})
end
end
end
describe "ab_finished" do
context "for an experiment that the user participates in" do
before(:each) do
@experiment_name = "link_color"
@alternatives = ["blue", "red"]
@experiment = Split::ExperimentCatalog.find_or_create(@experiment_name, *@alternatives)
@alternative_name = ab_test(@experiment_name, *@alternatives)
@previous_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count
end
it "should increment the counter for the completed alternative" do
ab_finished(@experiment_name)
new_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count
expect(new_completion_count).to eq(@previous_completion_count + 1)
end
it "should set experiment's finished key if reset is false" do
ab_finished(@experiment_name, { reset: false })
expect(ab_user[@experiment.key]).to eq(@alternative_name)
expect(ab_user[@experiment.finished_key]).to eq(true)
end
it "should not increment the counter if reset is false and the experiment has been already finished" do
2.times { ab_finished(@experiment_name, { reset: false }) }
new_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count
expect(new_completion_count).to eq(@previous_completion_count + 1)
end
it "should not increment the counter for an ended experiment" do
e = Split::ExperimentCatalog.find_or_create("button_size", "small", "big")
e.winner = "small"
a = ab_test("button_size", "small", "big")
expect(a).to eq("small")
expect {
ab_finished("button_size")
}.not_to change { Split::Alternative.new(a, "button_size").completed_count }
end
it "should clear out the user's participation from their session" do
expect(ab_user[@experiment.key]).to eq(@alternative_name)
ab_finished(@experiment_name)
expect(ab_user.keys).to be_empty
end
it "should not clear out the users session if reset is false" do
expect(ab_user[@experiment.key]).to eq(@alternative_name)
ab_finished(@experiment_name, { reset: false })
expect(ab_user[@experiment.key]).to eq(@alternative_name)
expect(ab_user[@experiment.finished_key]).to eq(true)
end
it "should reset the users session when experiment is not versioned" do
expect(ab_user[@experiment.key]).to eq(@alternative_name)
ab_finished(@experiment_name)
expect(ab_user.keys).to be_empty
end
it "should reset the users session when experiment is versioned" do
@experiment.increment_version
@alternative_name = ab_test(@experiment_name, *@alternatives)
expect(ab_user[@experiment.key]).to eq(@alternative_name)
ab_finished(@experiment_name)
expect(ab_user.keys).to be_empty
end
context "when on_trial_complete is set" do
before { Split.configuration.on_trial_complete = :some_method }
it "should call the method" do
expect(self).to receive(:some_method)
ab_finished(@experiment_name)
end
it "should not call the method without alternative" do
ab_user[@experiment.key] = nil
expect(self).not_to receive(:some_method)
ab_finished(@experiment_name)
end
end
end
context "for an experiment that the user is excluded from" do
before do
alternative = ab_test("link_color", "blue", "red")
expect(Split::Alternative.new(alternative, "link_color").participant_count).to eq(1)
alternative = ab_test("button_size", "small", "big")
expect(Split::Alternative.new(alternative, "button_size").participant_count).to eq(0)
end
it "should not increment the completed counter" do
# So, user should be participating in the link_color experiment and
# receive the control for button_size. As the user is not participating in
# the button size experiment, finishing it should not increase the
# completion count for that alternative.
expect {
ab_finished("button_size")
}.not_to change { Split::Alternative.new("small", "button_size").completed_count }
end
end
context "for an experiment that the user does not participate in" do
before do
Split::ExperimentCatalog.find_or_create(:not_started_experiment, "control", "alt")
end
it "should not raise an exception" do
expect { ab_finished(:not_started_experiment) }.not_to raise_exception
end
it "should not change the user state when reset is false" do
expect { ab_finished(:not_started_experiment, reset: false) }.not_to change { ab_user.keys }.from([])
end
it "should not change the user state when reset is true" do
expect(self).not_to receive(:reset!)
ab_finished(:not_started_experiment)
end
it "should not increment the completed counter" do
ab_finished(:not_started_experiment)
expect(Split::Alternative.new("control", :not_started_experiment).completed_count).to eq(0)
expect(Split::Alternative.new("alt", :not_started_experiment).completed_count).to eq(0)
end
end
end
context "finished with config" do
it "passes reset option" do
Split.configuration.experiments = {
my_experiment: {
alternatives: ["one", "two"],
resettable: false,
}
}
alternative = ab_test(:my_experiment)
experiment = Split::ExperimentCatalog.find :my_experiment
ab_finished :my_experiment
expect(ab_user[experiment.key]).to eq(alternative)
expect(ab_user[experiment.finished_key]).to eq(true)
end
end
context "finished with metric name" do
before { Split.configuration.experiments = {} }
before { expect(Split::Alternative).to receive(:new).at_least(1).times.and_call_original }
def should_finish_experiment(experiment_name, should_finish = true)
alts = Split.configuration.experiments[experiment_name][:alternatives]
experiment = Split::ExperimentCatalog.find_or_create(experiment_name, *alts)
alt_name = ab_user[experiment.key] = alts.first
alt = double("alternative")
expect(alt).to receive(:name).at_most(1).times.and_return(alt_name)
expect(Split::Alternative).to receive(:new).at_most(1).times.with(alt_name, experiment_name.to_s).and_return(alt)
if should_finish
expect(alt).to receive(:increment_completion).at_most(1).times
else
expect(alt).not_to receive(:increment_completion)
end
end
it "completes the test" do
Split.configuration.experiments[:my_experiment] = {
alternatives: [ "control_opt", "other_opt" ],
metric: :my_metric
}
should_finish_experiment :my_experiment
ab_finished :my_metric
end
it "completes all relevant tests" do
Split.configuration.experiments = {
exp_1: {
alternatives: [ "1-1", "1-2" ],
metric: :my_metric
},
exp_2: {
alternatives: [ "2-1", "2-2" ],
metric: :another_metric
},
exp_3: {
alternatives: [ "3-1", "3-2" ],
metric: :my_metric
},
}
should_finish_experiment :exp_1
should_finish_experiment :exp_2, false
should_finish_experiment :exp_3
ab_finished :my_metric
end
it "passes reset option" do
Split.configuration.experiments = {
my_exp: {
alternatives: ["one", "two"],
metric: :my_metric,
resettable: false,
}
}
alternative_name = ab_test(:my_exp)
exp = Split::ExperimentCatalog.find :my_exp
ab_finished :my_metric
expect(ab_user[exp.key]).to eq(alternative_name)
expect(ab_user[exp.finished_key]).to be_truthy
end
it "passes through options" do
Split.configuration.experiments = {
my_exp: {
alternatives: ["one", "two"],
metric: :my_metric,
}
}
alternative_name = ab_test(:my_exp)
exp = Split::ExperimentCatalog.find :my_exp
ab_finished :my_metric, reset: false
expect(ab_user[exp.key]).to eq(alternative_name)
expect(ab_user[exp.finished_key]).to be_truthy
end
end
describe "conversions" do
it "should return a conversion rate for an alternative" do
alternative_name = ab_test("link_color", "blue", "red")
previous_convertion_rate = Split::Alternative.new(alternative_name, "link_color").conversion_rate
expect(previous_convertion_rate).to eq(0.0)
ab_finished("link_color")
new_convertion_rate = Split::Alternative.new(alternative_name, "link_color").conversion_rate
expect(new_convertion_rate).to eq(1.0)
end
end
describe "active experiments" do
it "should show an active test" do
alternative = ab_test("def", "4", "5", "6")
expect(active_experiments.count).to eq 1
expect(active_experiments.first[0]).to eq "def"
expect(active_experiments.first[1]).to eq alternative
end
it "should show a finished test" do
alternative = ab_test("def", "4", "5", "6")
ab_finished("def", { reset: false })
expect(active_experiments.count).to eq 1
expect(active_experiments.first[0]).to eq "def"
expect(active_experiments.first[1]).to eq alternative
end
it "should show an active test when an experiment is on a later version" do
experiment.reset
expect(experiment.version).to eq(1)
ab_test("link_color", "blue", "red")
expect(active_experiments.count).to eq 1
expect(active_experiments.first[0]).to eq "link_color"
end
it "should show versioned tests properly" do
10.times { experiment.reset }
alternative = ab_test(experiment.name, "blue", "red")
ab_finished(experiment.name, reset: false)
expect(experiment.version).to eq(10)
expect(active_experiments.count).to eq 1
expect(active_experiments).to eq({ "link_color" => alternative })
end
it "should show multiple tests" do
Split.configure do |config|
config.allow_multiple_experiments = true
end
alternative = ab_test("def", "4", "5", "6")
another_alternative = ab_test("ghi", "7", "8", "9")
expect(active_experiments.count).to eq 2
expect(active_experiments["def"]).to eq alternative
expect(active_experiments["ghi"]).to eq another_alternative
end
it "should not show tests with winners" do
Split.configure do |config|
config.allow_multiple_experiments = true
end
e = Split::ExperimentCatalog.find_or_create("def", "4", "5", "6")
e.winner = "4"
ab_test("def", "4", "5", "6")
another_alternative = ab_test("ghi", "7", "8", "9")
expect(active_experiments.count).to eq 1
expect(active_experiments.first[0]).to eq "ghi"
expect(active_experiments.first[1]).to eq another_alternative
end
end
describe "when user is a robot" do
before(:each) do
@request = OpenStruct.new(user_agent: "Googlebot/2.1 (+http://www.google.com/bot.html)")
end
describe "ab_test" do
it "should return the control" do
alternative = ab_test("link_color", "blue", "red")
expect(alternative).to eq experiment.control.name
end
it "should not create a experiment" do
ab_test("link_color", "blue", "red")
expect(Split::Experiment.new("link_color")).to be_a_new_record
names_and_weights.should == [['control_opt', 0.18], ['second_opt', 0.18], ['third_opt', 0.64]]
names_and_weights.inject(0){|sum, nw| sum + nw[1]}.should == 1.0
end
end
it 'should handle multiple experiments correctly' do
ab_finished("link_color")
new_completion_count = Split::Alternative.new(alternative_name, "link_color").completed_count
expect(new_completion_count).to eq(previous_completion_count)
end
end
end
describe "when providing custom ignore logic" do
context "using a proc to configure custom logic" do
before(:each) do
Split.configure do |c|
c.ignore_filter = proc { |request| true } # ignore everything
end
end
it "ignores the ab_test" do
ab_test("link_color", "blue", "red")
red_count = Split::Alternative.new("red", "link_color").participant_count
blue_count = Split::Alternative.new("blue", "link_color").participant_count
expect((red_count + blue_count)).to be(0)
end
end
end
shared_examples_for "a disabled test" do
describe "ab_test" do
it "should return the control" do
alternative = ab_test("link_color", "blue", "red")
expect(alternative).to eq experiment.control.name
end
it "should not increment the participation count" do
previous_red_count = Split::Alternative.new("red", "link_color").participant_count
previous_blue_count = Split::Alternative.new("blue", "link_color").participant_count
ab_test("link_color", "blue", "red")
new_red_count = Split::Alternative.new("red", "link_color").participant_count
new_blue_count = Split::Alternative.new("blue", "link_color").participant_count
expect((new_red_count + new_blue_count)).to eq(previous_red_count + previous_blue_count)
end
end
describe "finished" do
it "should not increment the completed count" do
alternative_name = ab_test("link_color", "blue", "red")
previous_completion_count = Split::Alternative.new(alternative_name, "link_color").completed_count
ab_finished("link_color")
new_completion_count = Split::Alternative.new(alternative_name, "link_color").completed_count
expect(new_completion_count).to eq(previous_completion_count)
end
end
end
describe "when ip address is ignored" do
context "individually" do
before(:each) do
@request = OpenStruct.new(ip: "81.19.48.130")
Split.configure do |c|
c.ignore_ip_addresses << "81.19.48.130"
end
end
it_behaves_like "a disabled test"
end
context "for a range" do
before(:each) do
@request = OpenStruct.new(ip: "81.19.48.129")
Split.configure do |c|
c.ignore_ip_addresses << /81\.19\.48\.[0-9]+/
end
end
it_behaves_like "a disabled test"
end
context "using both a range and a specific value" do
before(:each) do
@request = OpenStruct.new(ip: "81.19.48.128")
Split.configure do |c|
c.ignore_ip_addresses << "81.19.48.130"
c.ignore_ip_addresses << /81\.19\.48\.[0-9]+/
end
end
it_behaves_like "a disabled test"
end
context "when ignored other address" do
before do
@request = OpenStruct.new(ip: "1.1.1.1")
Split.configure do |c|
c.ignore_ip_addresses << "81.19.48.130"
end
end
it "works as usual" do
alternative_name = ab_test("link_color", "red", "blue")
expect {
ab_finished("link_color")
}.to change(Split::Alternative.new(alternative_name, "link_color"), :completed_count).by(1)
end
end
end
describe "when user is previewing" do
before(:each) do
@request = OpenStruct.new(headers: { "x-purpose" => "preview" })
end
it_behaves_like "a disabled test"
end
describe "versioned experiments" do
it "should use version zero if no version is present" do
alternative_name = ab_test("link_color", "blue", "red")
expect(experiment.version).to eq(0)
expect(ab_user["link_color"]).to eq(alternative_name)
end
it "should save the version of the experiment to the session" do
experiment.reset
expect(experiment.version).to eq(1)
alternative_name = ab_test("link_color", "blue", "red")
expect(ab_user["link_color:1"]).to eq(alternative_name)
end
it "should load the experiment even if the version is not 0" do
experiment.reset
expect(experiment.version).to eq(1)
alternative_name = ab_test("link_color", "blue", "red")
expect(ab_user["link_color:1"]).to eq(alternative_name)
return_alternative_name = ab_test("link_color", "blue", "red")
expect(return_alternative_name).to eq(alternative_name)
end
it "should reset the session of a user on an older version of the experiment" do
alternative_name = ab_test("link_color", "blue", "red")
expect(ab_user["link_color"]).to eq(alternative_name)
alternative = Split::Alternative.new(alternative_name, "link_color")
expect(alternative.participant_count).to eq(1)
experiment.reset
expect(experiment.version).to eq(1)
alternative = Split::Alternative.new(alternative_name, "link_color")
expect(alternative.participant_count).to eq(0)
new_alternative_name = ab_test("link_color", "blue", "red")
expect(ab_user["link_color:1"]).to eq(new_alternative_name)
new_alternative = Split::Alternative.new(new_alternative_name, "link_color")
expect(new_alternative.participant_count).to eq(1)
end
it "should cleanup old versions of experiments from the session" do
alternative_name = ab_test("link_color", "blue", "red")
expect(ab_user["link_color"]).to eq(alternative_name)
alternative = Split::Alternative.new(alternative_name, "link_color")
expect(alternative.participant_count).to eq(1)
experiment.reset
expect(experiment.version).to eq(1)
alternative = Split::Alternative.new(alternative_name, "link_color")
expect(alternative.participant_count).to eq(0)
new_alternative_name = ab_test("link_color", "blue", "red")
expect(ab_user["link_color:1"]).to eq(new_alternative_name)
end
it "should only count completion of users on the current version" do
alternative_name = ab_test("link_color", "blue", "red")
expect(ab_user["link_color"]).to eq(alternative_name)
Split::Alternative.new(alternative_name, "link_color")
experiment.reset
expect(experiment.version).to eq(1)
ab_finished("link_color")
alternative = Split::Alternative.new(alternative_name, "link_color")
expect(alternative.completed_count).to eq(0)
end
end
context "when redis is not available" do
before(:each) do
expect(Split).to receive(:redis).at_most(5).times.and_raise(Errno::ECONNREFUSED.new)
end
context "and db_failover config option is turned off" do
before(:each) do
Split.configure do |config|
config.db_failover = false
end
end
describe "ab_test" do
it "should raise an exception" do
expect { ab_test("link_color", "blue", "red") }.to raise_error(Errno::ECONNREFUSED)
end
end
describe "finished" do
it "should raise an exception" do
expect { ab_finished("link_color") }.to raise_error(Errno::ECONNREFUSED)
end
end
describe "disable split testing" do
before(:each) do
Split.configure do |config|
config.enabled = false
end
end
it "should not attempt to connect to redis" do
expect { ab_test("link_color", "blue", "red") }.not_to raise_error
end
it "should return control variable" do
expect(ab_test("link_color", "blue", "red")).to eq("blue")
expect { ab_finished("link_color") }.not_to raise_error
end
end
end
context "and db_failover config option is turned on" do
before(:each) do
Split.configure do |config|
config.db_failover = true
end
end
describe "ab_test" do
it "should not raise an exception" do
expect { ab_test("link_color", "blue", "red") }.not_to raise_error
end
it "should call db_failover_on_db_error proc with error as parameter" do
Split.configure do |config|
config.db_failover_on_db_error = proc do |error|
expect(error).to be_a(Errno::ECONNREFUSED)
end
end
expect(Split.configuration.db_failover_on_db_error).to receive(:call).and_call_original
ab_test("link_color", "blue", "red")
end
it "should always use first alternative" do
expect(ab_test("link_color", "blue", "red")).to eq("blue")
expect(ab_test("link_color", { "blue" => 0.01 }, "red" => 0.2)).to eq("blue")
expect(ab_test("link_color", { "blue" => 0.8 }, { "red" => 20 })).to eq("blue")
expect(ab_test("link_color", "blue", "red") do |alternative|
"shared/#{alternative}"
end).to eq("shared/blue")
end
context "and db_failover_allow_parameter_override config option is turned on" do
before(:each) do
Split.configure do |config|
config.db_failover_allow_parameter_override = true
end
end
context "and given an override parameter" do
it "should use given override instead of the first alternative" do
@params = { "ab_test" => { "link_color" => "red" } }
expect(ab_test("link_color", "blue", "red")).to eq("red")
expect(ab_test("link_color", "blue", "red", "green")).to eq("red")
expect(ab_test("link_color", { "blue" => 0.01 }, "red" => 0.2)).to eq("red")
expect(ab_test("link_color", { "blue" => 0.8 }, { "red" => 20 })).to eq("red")
expect(ab_test("link_color", "blue", "red") do |alternative|
"shared/#{alternative}"
end).to eq("shared/red")
end
end
end
context "and preloaded config given" do
before do
Split.configuration.experiments[:link_color] = {
alternatives: [ "blue", "red" ],
}
end
it "uses first alternative" do
expect(ab_test(:link_color)).to eq("blue")
end
end
end
describe "finished" do
it "should not raise an exception" do
expect { ab_finished("link_color") }.not_to raise_error
end
it "should call db_failover_on_db_error proc with error as parameter" do
Split.configure do |config|
config.db_failover_on_db_error = proc do |error|
expect(error).to be_a(Errno::ECONNREFUSED)
end
end
expect(Split.configuration.db_failover_on_db_error).to receive(:call).and_call_original
ab_finished("link_color")
end
end
end
end
context "with preloaded config" do
before { Split.configuration.experiments = {} }
it "pulls options from config file" do
Split.configuration.experiments[:my_experiment] = {
alternatives: [ "control_opt", "other_opt" ],
goals: ["goal1", "goal2"]
}
ab_test :my_experiment
expect(Split::Experiment.new(:my_experiment).alternatives.map(&:name)).to eq([ "control_opt", "other_opt" ])
expect(Split::Experiment.new(:my_experiment).goals).to eq([ "goal1", "goal2" ])
end
it "can be called multiple times" do
Split.configuration.experiments[:my_experiment] = {
alternatives: [ "control_opt", "other_opt" ],
goals: ["goal1", "goal2"]
}
5.times { ab_test :my_experiment }
experiment = Split::Experiment.new(:my_experiment)
expect(experiment.alternatives.map(&:name)).to eq([ "control_opt", "other_opt" ])
expect(experiment.goals).to eq([ "goal1", "goal2" ])
expect(experiment.participant_count).to eq(1)
end
it "accepts multiple goals" do
Split.configuration.experiments[:my_experiment] = {
alternatives: [ "control_opt", "other_opt" ],
goals: [ "goal1", "goal2", "goal3" ]
}
ab_test :my_experiment
experiment = Split::Experiment.new(:my_experiment)
expect(experiment.goals).to eq([ "goal1", "goal2", "goal3" ])
end
it "allow specifying goals to be optional" do
Split.configuration.experiments[:my_experiment] = {
alternatives: [ "control_opt", "other_opt" ]
}
experiment = Split::Experiment.new(:my_experiment)
expect(experiment.goals).to eq([])
end
it "accepts multiple alternatives" do
Split.configuration.experiments[:my_experiment] = {
alternatives: [ "control_opt", "second_opt", "third_opt" ],
}
ab_test :my_experiment
experiment = Split::Experiment.new(:my_experiment)
expect(experiment.alternatives.map(&:name)).to eq([ "control_opt", "second_opt", "third_opt" ])
end
it "accepts probability on alternatives" do
Split.configuration.experiments[:my_experiment] = {
alternatives: [
{ name: "control_opt", percent: 67 },
{ name: "second_opt", percent: 10 },
{ name: "third_opt", percent: 23 },
],
}
ab_test :my_experiment
experiment = Split::Experiment.new(:my_experiment)
expect(experiment.alternatives.collect { |a| [a.name, a.weight] }).to eq([["control_opt", 0.67], ["second_opt", 0.1], ["third_opt", 0.23]])
end
it "accepts probability on some alternatives" do
Split.configuration.experiments[:my_experiment] = {
alternatives: [
{ name: "control_opt", percent: 34 },
"second_opt",
{ name: "third_opt", percent: 23 },
"fourth_opt",
],
}
ab_test :my_experiment
experiment = Split::Experiment.new(:my_experiment)
names_and_weights = experiment.alternatives.collect { |a| [a.name, a.weight] }
expect(names_and_weights).to eq([["control_opt", 0.34], ["second_opt", 0.215], ["third_opt", 0.23], ["fourth_opt", 0.215]])
expect(names_and_weights.inject(0) { |sum, nw| sum + nw[1] }).to eq(1.0)
end
it "allows name param without probability" do
Split.configuration.experiments[:my_experiment] = {
alternatives: [
{ name: "control_opt" },
"second_opt",
{ name: "third_opt", percent: 64 },
],
}
ab_test :my_experiment
experiment = Split::Experiment.new(:my_experiment)
names_and_weights = experiment.alternatives.collect { |a| [a.name, a.weight] }
expect(names_and_weights).to eq([["control_opt", 0.18], ["second_opt", 0.18], ["third_opt", 0.64]])
expect(names_and_weights.inject(0) { |sum, nw| sum + nw[1] }).to eq(1.0)
end
it "fails gracefully if config is missing experiment" do
Split.configuration.experiments = { other_experiment: { foo: "Bar" } }
expect { ab_test :my_experiment }.to raise_error(Split::ExperimentNotFound)
end
it "fails gracefully if config is missing" do
expect { Split.configuration.experiments = nil }.to raise_error(Split::InvalidExperimentsFormatError)
end
it "fails gracefully if config is missing alternatives" do
Split.configuration.experiments[:my_experiment] = { foo: "Bar" }
expect { ab_test :my_experiment }.to raise_error(NoMethodError)
end
end
it "should handle multiple experiments correctly" do
experiment2 = Split::ExperimentCatalog.find_or_create("link_color2", "blue", "red")
ab_test("link_color", "blue", "red")
ab_test("link_color2", "blue", "red")
ab_finished("link_color2")
experiment2.alternatives.each do |alt|
expect(alt.unfinished_count).to eq(0)
end
end
context "with goals" do
before do
@experiment = { "link_color" => ["purchase", "refund"] }
@alternatives = ["blue", "red"]
@experiment_name, @goals = normalize_metric(@experiment)
@goal1 = @goals[0]
@goal2 = @goals[1]
end
it "should normalize experiment" do
expect(@experiment_name).to eq("link_color")
expect(@goals).to eq(["purchase", "refund"])
end
describe "ab_test" do
it "should allow experiment goals interface as a single hash" do
ab_test(@experiment, *@alternatives)
experiment = Split::ExperimentCatalog.find("link_color")
expect(experiment.goals).to eq(["purchase", "refund"])
end
end
describe "ab_finished" do
before do
@alternative_name = ab_test(@experiment, *@alternatives)
end
it "should increment the counter for the specified-goal completed alternative" do
expect { ab_finished({ "link_color" => ["purchase"] }) }
.to change { Split::Alternative.new(@alternative_name, @experiment_name).completed_count(@goal2) }.by(0)
.and change { Split::Alternative.new(@alternative_name, @experiment_name).completed_count(@goal1) }.by(1)
end
end
end
end
<MSG> Fail gracefully when config file is missing or bad
This is better than raising obscure exceptions from the internals.
<DFF> @@ -649,6 +649,21 @@ describe Split::Helper do
names_and_weights.should == [['control_opt', 0.18], ['second_opt', 0.18], ['third_opt', 0.64]]
names_and_weights.inject(0){|sum, nw| sum + nw[1]}.should == 1.0
end
+
+ it "fails gracefully if config is missing experiment" do
+ Split.configuration.experiments = { :other_experiment => { :foo => "Bar" } }
+ lambda { ab_test :my_experiment }.should raise_error(/not found/i)
+ end
+
+ it "fails gracefully if config is missing" do
+ Split.configuration.experiments = nil
+ lambda { ab_test :my_experiment }.should raise_error(/not found/i)
+ end
+
+ it "fails gracefully if config is missing variants" do
+ Split.configuration.experiments[:my_experiment] = { :foo => "Bar" }
+ lambda { ab_test :my_experiment }.should raise_error(/variants/i)
+ end
end
it 'should handle multiple experiments correctly' do
| 15 | Fail gracefully when config file is missing or bad | 0 | .rb | rb | mit | splitrb/split |
10071085 | <NME> helper_spec.rb
<BEF> # frozen_string_literal: true
require "spec_helper"
# TODO change some of these tests to use Rack::Test
describe Split::Helper do
include Split::Helper
let(:experiment) {
Split::ExperimentCatalog.find_or_create("link_color", "blue", "red")
}
describe "ab_test" do
it "should not raise an error when passed strings for alternatives" do
expect { ab_test("xyz", "1", "2", "3") }.not_to raise_error
end
it "should not raise an error when passed an array for alternatives" do
expect { ab_test("xyz", ["1", "2", "3"]) }.not_to raise_error
end
it "should raise the appropriate error when passed integers for alternatives" do
expect { ab_test("xyz", 1, 2, 3) }.to raise_error(ArgumentError)
end
it "should raise the appropriate error when passed symbols for alternatives" do
expect { ab_test("xyz", :a, :b, :c) }.to raise_error(ArgumentError)
end
it "should not raise error when passed an array for goals" do
expect { ab_test({ "link_color" => ["purchase", "refund"] }, "blue", "red") }.not_to raise_error
end
it "should not raise error when passed just one goal" do
expect { ab_test({ "link_color" => "purchase" }, "blue", "red") }.not_to raise_error
end
it "raises an appropriate error when processing combined expirements" do
Split.configuration.experiments = {
combined_exp_1: {
alternatives: [ { name: "control", percent: 50 }, { name: "test-alt", percent: 50 } ],
metric: :my_metric,
combined_experiments: [:combined_exp_1_sub_1]
}
}
Split::ExperimentCatalog.find_or_create("combined_exp_1")
expect { ab_test("combined_exp_1") }.to raise_error(Split::InvalidExperimentsFormatError)
end
it "should assign a random alternative to a new user when there are an equal number of alternatives assigned" do
ab_test("link_color", "blue", "red")
expect(["red", "blue"]).to include(ab_user["link_color"])
end
it "should increment the participation counter after assignment to a new user" do
previous_red_count = Split::Alternative.new("red", "link_color").participant_count
previous_blue_count = Split::Alternative.new("blue", "link_color").participant_count
ab_test("link_color", "blue", "red")
new_red_count = Split::Alternative.new("red", "link_color").participant_count
new_blue_count = Split::Alternative.new("blue", "link_color").participant_count
expect((new_red_count + new_blue_count)).to eq(previous_red_count + previous_blue_count + 1)
end
it "should not increment the counter for an experiment that the user is not participating in" do
ab_test("link_color", "blue", "red")
e = Split::ExperimentCatalog.find_or_create("button_size", "small", "big")
expect {
# User shouldn't participate in this second experiment
ab_test("button_size", "small", "big")
}.not_to change { e.participant_count }
end
it "should not increment the counter for an ended experiment" do
e = Split::ExperimentCatalog.find_or_create("button_size", "small", "big")
e.winner = "small"
expect {
a = ab_test("button_size", "small", "big")
expect(a).to eq("small")
}.not_to change { e.participant_count }
end
it "should not increment the counter for an not started experiment" do
expect(Split.configuration).to receive(:start_manually).and_return(true)
e = Split::ExperimentCatalog.find_or_create("button_size", "small", "big")
expect {
a = ab_test("button_size", "small", "big")
expect(a).to eq("small")
}.not_to change { e.participant_count }
end
it "should return the given alternative for an existing user" do
expect(ab_test("link_color", "blue", "red")).to eq ab_test("link_color", "blue", "red")
end
it "should always return the winner if one is present" do
experiment.winner = "orange"
expect(ab_test("link_color", "blue", "red")).to eq("orange")
end
it "should allow the alternative to be forced by passing it in the params" do
# ?ab_test[link_color]=blue
@params = { "ab_test" => { "link_color" => "blue" } }
alternative = ab_test("link_color", "blue", "red")
expect(alternative).to eq("blue")
alternative = ab_test("link_color", { "blue" => 1 }, "red" => 5)
expect(alternative).to eq("blue")
@params = { "ab_test" => { "link_color" => "red" } }
alternative = ab_test("link_color", "blue", "red")
expect(alternative).to eq("red")
alternative = ab_test("link_color", { "blue" => 5 }, "red" => 1)
expect(alternative).to eq("red")
end
it "should not allow an arbitrary alternative" do
@params = { "ab_test" => { "link_color" => "pink" } }
alternative = ab_test("link_color", "blue")
expect(alternative).to eq("blue")
end
it "should not store the split when a param forced alternative" do
@params = { "ab_test" => { "link_color" => "blue" } }
expect(ab_user).not_to receive(:[]=)
ab_test("link_color", "blue", "red")
end
it "SPLIT_DISABLE query parameter should also force the alternative (uses control)" do
@params = { "SPLIT_DISABLE" => "true" }
alternative = ab_test("link_color", "blue", "red")
expect(alternative).to eq("blue")
alternative = ab_test("link_color", { "blue" => 1 }, "red" => 5)
expect(alternative).to eq("blue")
alternative = ab_test("link_color", "red", "blue")
expect(alternative).to eq("red")
alternative = ab_test("link_color", { "red" => 5 }, "blue" => 1)
expect(alternative).to eq("red")
end
it "should not store the split when Split generically disabled" do
@params = { "SPLIT_DISABLE" => "true" }
expect(ab_user).not_to receive(:[]=)
ab_test("link_color", "blue", "red")
end
context "when store_override is set" do
before { Split.configuration.store_override = true }
it "should store the forced alternative" do
@params = { "ab_test" => { "link_color" => "blue" } }
expect(ab_user).to receive(:[]=).with("link_color", "blue")
ab_test("link_color", "blue", "red")
end
end
context "when on_trial_choose is set" do
before { Split.configuration.on_trial_choose = :some_method }
it "should call the method" do
expect(self).to receive(:some_method)
ab_test("link_color", "blue", "red")
end
end
it "should allow passing a block" do
alt = ab_test("link_color", "blue", "red")
ret = ab_test("link_color", "blue", "red") { |alternative| "shared/#{alternative}" }
expect(ret).to eq("shared/#{alt}")
end
it "should allow the share of visitors see an alternative to be specified" do
ab_test("link_color", { "blue" => 0.8 }, { "red" => 20 })
expect(["red", "blue"]).to include(ab_user["link_color"])
end
it "should allow alternative weighting interface as a single hash" do
ab_test("link_color", { "blue" => 0.01 }, "red" => 0.2)
experiment = Split::ExperimentCatalog.find("link_color")
expect(experiment.alternatives.map(&:name)).to eq(["blue", "red"])
expect(experiment.alternatives.collect { |a| a.weight }).to match_array([0.01, 0.2])
end
it "should only let a user participate in one experiment at a time" do
link_color = ab_test("link_color", "blue", "red")
ab_test("button_size", "small", "big")
expect(ab_user["link_color"]).to eq(link_color)
big = Split::Alternative.new("big", "button_size")
expect(big.participant_count).to eq(0)
small = Split::Alternative.new("small", "button_size")
expect(small.participant_count).to eq(0)
end
it "should let a user participate in many experiment with allow_multiple_experiments option" do
Split.configure do |config|
config.allow_multiple_experiments = true
end
link_color = ab_test("link_color", "blue", "red")
button_size = ab_test("button_size", "small", "big")
expect(ab_user["link_color"]).to eq(link_color)
expect(ab_user["button_size"]).to eq(button_size)
button_size_alt = Split::Alternative.new(button_size, "button_size")
expect(button_size_alt.participant_count).to eq(1)
end
context "with allow_multiple_experiments = 'control'" do
it "should let a user participate in many experiment with one non-'control' alternative" do
Split.configure do |config|
config.allow_multiple_experiments = "control"
end
groups = 100.times.map do |n|
ab_test("test#{n}".to_sym, { "control" => (100 - n) }, { "test#{n}-alt" => n })
end
experiments = ab_user.active_experiments
expect(experiments.size).to be > 1
count_control = experiments.values.count { |g| g == "control" }
expect(count_control).to eq(experiments.size - 1)
count_alts = groups.count { |g| g != "control" }
expect(count_alts).to eq(1)
end
context "when user already has experiment" do
let(:mock_user) { Split::User.new(self, { "test_0" => "test-alt" }) }
before do
Split.configure do |config|
config.allow_multiple_experiments = "control"
end
Split::ExperimentCatalog.find_or_initialize("test_0", "control", "test-alt").save
Split::ExperimentCatalog.find_or_initialize("test_1", "control", "test-alt").save
end
it "should restore previously selected alternative" do
expect(ab_user.active_experiments.size).to eq 1
expect(ab_test(:test_0, { "control" => 100 }, { "test-alt" => 1 })).to eq "test-alt"
expect(ab_test(:test_0, { "control" => 1 }, { "test-alt" => 100 })).to eq "test-alt"
end
it "should select the correct alternatives after experiment resets" do
experiment = Split::ExperimentCatalog.find(:test_0)
experiment.reset
mock_user[experiment.key] = "test-alt"
expect(ab_user.active_experiments.size).to eq 1
expect(ab_test(:test_0, { "control" => 100 }, { "test-alt" => 1 })).to eq "test-alt"
expect(ab_test(:test_0, { "control" => 0 }, { "test-alt" => 100 })).to eq "test-alt"
end
it "lets override existing choice" do
pending "this requires user store reset on first call not depending on whelther it is current trial"
@params = { "ab_test" => { "test_1" => "test-alt" } }
expect(ab_test(:test_0, { "control" => 0 }, { "test-alt" => 100 })).to eq "control"
expect(ab_test(:test_1, { "control" => 100 }, { "test-alt" => 1 })).to eq "test-alt"
end
end
end
it "should not over-write a finished key when an experiment is on a later version" do
experiment.increment_version
ab_user = { experiment.key => "blue", experiment.finished_key => true }
finished_session = ab_user.dup
ab_test("link_color", "blue", "red")
expect(ab_user).to eq(finished_session)
end
end
describe "metadata" do
context "is defined" do
before do
Split.configuration.experiments = {
my_experiment: {
alternatives: ["one", "two"],
resettable: false,
metadata: { "one" => "Meta1", "two" => "Meta2" }
}
}
end
it "should be passed to helper block" do
@params = { "ab_test" => { "my_experiment" => "two" } }
expect(ab_test("my_experiment")).to eq "two"
expect(ab_test("my_experiment") do |alternative, meta|
meta
end).to eq("Meta2")
end
it "should pass control metadata helper block if library disabled" do
Split.configure do |config|
config.enabled = false
end
expect(ab_test("my_experiment")).to eq "one"
expect(ab_test("my_experiment") do |_, meta|
meta
end).to eq("Meta1")
end
end
context "is not defined" do
before do
Split.configuration.experiments = {
my_experiment: {
alternatives: ["one", "two"],
resettable: false,
metadata: nil
}
}
end
it "should be passed to helper block" do
expect(ab_test("my_experiment") do |alternative, meta|
meta
end).to eq({})
end
it "should pass control metadata helper block if library disabled" do
Split.configure do |config|
config.enabled = false
end
expect(ab_test("my_experiment") do |_, meta|
meta
end).to eq({})
end
end
end
describe "ab_finished" do
context "for an experiment that the user participates in" do
before(:each) do
@experiment_name = "link_color"
@alternatives = ["blue", "red"]
@experiment = Split::ExperimentCatalog.find_or_create(@experiment_name, *@alternatives)
@alternative_name = ab_test(@experiment_name, *@alternatives)
@previous_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count
end
it "should increment the counter for the completed alternative" do
ab_finished(@experiment_name)
new_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count
expect(new_completion_count).to eq(@previous_completion_count + 1)
end
it "should set experiment's finished key if reset is false" do
ab_finished(@experiment_name, { reset: false })
expect(ab_user[@experiment.key]).to eq(@alternative_name)
expect(ab_user[@experiment.finished_key]).to eq(true)
end
it "should not increment the counter if reset is false and the experiment has been already finished" do
2.times { ab_finished(@experiment_name, { reset: false }) }
new_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count
expect(new_completion_count).to eq(@previous_completion_count + 1)
end
it "should not increment the counter for an ended experiment" do
e = Split::ExperimentCatalog.find_or_create("button_size", "small", "big")
e.winner = "small"
a = ab_test("button_size", "small", "big")
expect(a).to eq("small")
expect {
ab_finished("button_size")
}.not_to change { Split::Alternative.new(a, "button_size").completed_count }
end
it "should clear out the user's participation from their session" do
expect(ab_user[@experiment.key]).to eq(@alternative_name)
ab_finished(@experiment_name)
end
end
shared_examples_for "a disabled test" do
describe 'ab_test' do
before { Split.configuration.on_trial_complete = :some_method }
it "should call the method" do
expect(self).to receive(:some_method)
ab_finished(@experiment_name)
end
it "should not call the method without alternative" do
ab_user[@experiment.key] = nil
expect(self).not_to receive(:some_method)
ab_finished(@experiment_name)
end
end
end
context "for an experiment that the user is excluded from" do
before do
alternative = ab_test("link_color", "blue", "red")
expect(Split::Alternative.new(alternative, "link_color").participant_count).to eq(1)
alternative = ab_test("button_size", "small", "big")
expect(Split::Alternative.new(alternative, "button_size").participant_count).to eq(0)
end
it "should not increment the completed counter" do
# So, user should be participating in the link_color experiment and
# receive the control for button_size. As the user is not participating in
# the button size experiment, finishing it should not increase the
# completion count for that alternative.
expect {
ab_finished("button_size")
}.not_to change { Split::Alternative.new("small", "button_size").completed_count }
end
end
context "for an experiment that the user does not participate in" do
before do
Split::ExperimentCatalog.find_or_create(:not_started_experiment, "control", "alt")
end
it "should not raise an exception" do
expect { ab_finished(:not_started_experiment) }.not_to raise_exception
end
it "should not change the user state when reset is false" do
expect { ab_finished(:not_started_experiment, reset: false) }.not_to change { ab_user.keys }.from([])
end
it "should not change the user state when reset is true" do
expect(self).not_to receive(:reset!)
ab_finished(:not_started_experiment)
end
it "should not increment the completed counter" do
ab_finished(:not_started_experiment)
expect(Split::Alternative.new("control", :not_started_experiment).completed_count).to eq(0)
expect(Split::Alternative.new("alt", :not_started_experiment).completed_count).to eq(0)
end
end
end
context "finished with config" do
it "passes reset option" do
Split.configuration.experiments = {
my_experiment: {
alternatives: ["one", "two"],
resettable: false,
}
}
alternative = ab_test(:my_experiment)
experiment = Split::ExperimentCatalog.find :my_experiment
ab_finished :my_experiment
expect(ab_user[experiment.key]).to eq(alternative)
expect(ab_user[experiment.finished_key]).to eq(true)
end
end
context "finished with metric name" do
before { Split.configuration.experiments = {} }
before { expect(Split::Alternative).to receive(:new).at_least(1).times.and_call_original }
def should_finish_experiment(experiment_name, should_finish = true)
alts = Split.configuration.experiments[experiment_name][:alternatives]
experiment = Split::ExperimentCatalog.find_or_create(experiment_name, *alts)
alt_name = ab_user[experiment.key] = alts.first
alt = double("alternative")
expect(alt).to receive(:name).at_most(1).times.and_return(alt_name)
expect(Split::Alternative).to receive(:new).at_most(1).times.with(alt_name, experiment_name.to_s).and_return(alt)
if should_finish
expect(alt).to receive(:increment_completion).at_most(1).times
else
expect(alt).not_to receive(:increment_completion)
end
end
it "completes the test" do
Split.configuration.experiments[:my_experiment] = {
alternatives: [ "control_opt", "other_opt" ],
metric: :my_metric
}
should_finish_experiment :my_experiment
ab_finished :my_metric
end
it "completes all relevant tests" do
Split.configuration.experiments = {
exp_1: {
alternatives: [ "1-1", "1-2" ],
metric: :my_metric
},
exp_2: {
alternatives: [ "2-1", "2-2" ],
metric: :another_metric
},
exp_3: {
alternatives: [ "3-1", "3-2" ],
metric: :my_metric
},
}
should_finish_experiment :exp_1
should_finish_experiment :exp_2, false
should_finish_experiment :exp_3
ab_finished :my_metric
end
it "passes reset option" do
Split.configuration.experiments = {
my_exp: {
alternatives: ["one", "two"],
metric: :my_metric,
resettable: false,
}
}
alternative_name = ab_test(:my_exp)
exp = Split::ExperimentCatalog.find :my_exp
ab_finished :my_metric
expect(ab_user[exp.key]).to eq(alternative_name)
expect(ab_user[exp.finished_key]).to be_truthy
end
it "passes through options" do
Split.configuration.experiments = {
my_exp: {
alternatives: ["one", "two"],
metric: :my_metric,
}
}
alternative_name = ab_test(:my_exp)
exp = Split::ExperimentCatalog.find :my_exp
ab_finished :my_metric, reset: false
expect(ab_user[exp.key]).to eq(alternative_name)
expect(ab_user[exp.finished_key]).to be_truthy
end
end
describe "conversions" do
it "should return a conversion rate for an alternative" do
alternative_name = ab_test("link_color", "blue", "red")
previous_convertion_rate = Split::Alternative.new(alternative_name, "link_color").conversion_rate
expect(previous_convertion_rate).to eq(0.0)
ab_finished("link_color")
new_convertion_rate = Split::Alternative.new(alternative_name, "link_color").conversion_rate
expect(new_convertion_rate).to eq(1.0)
end
end
describe "active experiments" do
it "should show an active test" do
alternative = ab_test("def", "4", "5", "6")
expect(active_experiments.count).to eq 1
expect(active_experiments.first[0]).to eq "def"
expect(active_experiments.first[1]).to eq alternative
end
it "should show a finished test" do
alternative = ab_test("def", "4", "5", "6")
ab_finished("def", { reset: false })
expect(active_experiments.count).to eq 1
expect(active_experiments.first[0]).to eq "def"
expect(active_experiments.first[1]).to eq alternative
end
it "should show an active test when an experiment is on a later version" do
experiment.reset
expect(experiment.version).to eq(1)
ab_test("link_color", "blue", "red")
expect(active_experiments.count).to eq 1
expect(active_experiments.first[0]).to eq "link_color"
end
it "should show versioned tests properly" do
10.times { experiment.reset }
alternative = ab_test(experiment.name, "blue", "red")
ab_finished(experiment.name, reset: false)
expect(experiment.version).to eq(10)
expect(active_experiments.count).to eq 1
expect(active_experiments).to eq({ "link_color" => alternative })
end
it "should show multiple tests" do
Split.configure do |config|
config.allow_multiple_experiments = true
end
alternative = ab_test("def", "4", "5", "6")
another_alternative = ab_test("ghi", "7", "8", "9")
expect(active_experiments.count).to eq 2
expect(active_experiments["def"]).to eq alternative
expect(active_experiments["ghi"]).to eq another_alternative
end
it "should not show tests with winners" do
Split.configure do |config|
config.allow_multiple_experiments = true
end
e = Split::ExperimentCatalog.find_or_create("def", "4", "5", "6")
e.winner = "4"
ab_test("def", "4", "5", "6")
another_alternative = ab_test("ghi", "7", "8", "9")
expect(active_experiments.count).to eq 1
expect(active_experiments.first[0]).to eq "ghi"
expect(active_experiments.first[1]).to eq another_alternative
end
end
describe "when user is a robot" do
before(:each) do
@request = OpenStruct.new(user_agent: "Googlebot/2.1 (+http://www.google.com/bot.html)")
end
describe "ab_test" do
it "should return the control" do
alternative = ab_test("link_color", "blue", "red")
expect(alternative).to eq experiment.control.name
end
it "should not create a experiment" do
ab_test("link_color", "blue", "red")
expect(Split::Experiment.new("link_color")).to be_a_new_record
end
it "should not increment the participation count" do
previous_red_count = Split::Alternative.new("red", "link_color").participant_count
previous_blue_count = Split::Alternative.new("blue", "link_color").participant_count
ab_test("link_color", "blue", "red")
new_red_count = Split::Alternative.new("red", "link_color").participant_count
new_blue_count = Split::Alternative.new("blue", "link_color").participant_count
expect((new_red_count + new_blue_count)).to eq(previous_red_count + previous_blue_count)
end
end
describe "finished" do
it "should not increment the completed count" do
alternative_name = ab_test("link_color", "blue", "red")
previous_completion_count = Split::Alternative.new(alternative_name, "link_color").completed_count
ab_finished("link_color")
new_completion_count = Split::Alternative.new(alternative_name, "link_color").completed_count
expect(new_completion_count).to eq(previous_completion_count)
end
end
end
describe "when providing custom ignore logic" do
context "using a proc to configure custom logic" do
before(:each) do
Split.configure do |c|
c.ignore_filter = proc { |request| true } # ignore everything
end
end
it "ignores the ab_test" do
ab_test("link_color", "blue", "red")
red_count = Split::Alternative.new("red", "link_color").participant_count
blue_count = Split::Alternative.new("blue", "link_color").participant_count
expect((red_count + blue_count)).to be(0)
end
end
end
shared_examples_for "a disabled test" do
describe "ab_test" do
it "should return the control" do
alternative = ab_test("link_color", "blue", "red")
expect(alternative).to eq experiment.control.name
end
it "should not increment the participation count" do
previous_red_count = Split::Alternative.new("red", "link_color").participant_count
previous_blue_count = Split::Alternative.new("blue", "link_color").participant_count
ab_test("link_color", "blue", "red")
new_red_count = Split::Alternative.new("red", "link_color").participant_count
new_blue_count = Split::Alternative.new("blue", "link_color").participant_count
expect((new_red_count + new_blue_count)).to eq(previous_red_count + previous_blue_count)
end
end
describe "finished" do
it "should not increment the completed count" do
alternative_name = ab_test("link_color", "blue", "red")
previous_completion_count = Split::Alternative.new(alternative_name, "link_color").completed_count
ab_finished("link_color")
new_completion_count = Split::Alternative.new(alternative_name, "link_color").completed_count
expect(new_completion_count).to eq(previous_completion_count)
end
end
end
describe "when ip address is ignored" do
context "individually" do
before(:each) do
@request = OpenStruct.new(ip: "81.19.48.130")
Split.configure do |c|
c.ignore_ip_addresses << "81.19.48.130"
end
end
it_behaves_like "a disabled test"
end
context "for a range" do
before(:each) do
@request = OpenStruct.new(ip: "81.19.48.129")
Split.configure do |c|
c.ignore_ip_addresses << /81\.19\.48\.[0-9]+/
end
end
it_behaves_like "a disabled test"
end
context "using both a range and a specific value" do
before(:each) do
@request = OpenStruct.new(ip: "81.19.48.128")
Split.configure do |c|
c.ignore_ip_addresses << "81.19.48.130"
c.ignore_ip_addresses << /81\.19\.48\.[0-9]+/
end
end
it_behaves_like "a disabled test"
end
context "when ignored other address" do
before do
@request = OpenStruct.new(ip: "1.1.1.1")
Split.configure do |c|
c.ignore_ip_addresses << "81.19.48.130"
end
end
it "works as usual" do
alternative_name = ab_test("link_color", "red", "blue")
expect {
ab_finished("link_color")
}.to change(Split::Alternative.new(alternative_name, "link_color"), :completed_count).by(1)
end
end
end
describe "when user is previewing" do
before(:each) do
@request = OpenStruct.new(headers: { "x-purpose" => "preview" })
end
it_behaves_like "a disabled test"
end
describe "versioned experiments" do
it "should use version zero if no version is present" do
alternative_name = ab_test("link_color", "blue", "red")
expect(experiment.version).to eq(0)
expect(ab_user["link_color"]).to eq(alternative_name)
end
it "should save the version of the experiment to the session" do
experiment.reset
expect(experiment.version).to eq(1)
alternative_name = ab_test("link_color", "blue", "red")
expect(ab_user["link_color:1"]).to eq(alternative_name)
end
it "should load the experiment even if the version is not 0" do
experiment.reset
expect(experiment.version).to eq(1)
alternative_name = ab_test("link_color", "blue", "red")
expect(ab_user["link_color:1"]).to eq(alternative_name)
return_alternative_name = ab_test("link_color", "blue", "red")
expect(return_alternative_name).to eq(alternative_name)
end
it "should reset the session of a user on an older version of the experiment" do
alternative_name = ab_test("link_color", "blue", "red")
expect(ab_user["link_color"]).to eq(alternative_name)
alternative = Split::Alternative.new(alternative_name, "link_color")
expect(alternative.participant_count).to eq(1)
experiment.reset
expect(experiment.version).to eq(1)
alternative = Split::Alternative.new(alternative_name, "link_color")
expect(alternative.participant_count).to eq(0)
new_alternative_name = ab_test("link_color", "blue", "red")
expect(ab_user["link_color:1"]).to eq(new_alternative_name)
new_alternative = Split::Alternative.new(new_alternative_name, "link_color")
expect(new_alternative.participant_count).to eq(1)
end
it "should cleanup old versions of experiments from the session" do
alternative_name = ab_test("link_color", "blue", "red")
expect(ab_user["link_color"]).to eq(alternative_name)
alternative = Split::Alternative.new(alternative_name, "link_color")
expect(alternative.participant_count).to eq(1)
experiment.reset
expect(experiment.version).to eq(1)
alternative = Split::Alternative.new(alternative_name, "link_color")
expect(alternative.participant_count).to eq(0)
new_alternative_name = ab_test("link_color", "blue", "red")
expect(ab_user["link_color:1"]).to eq(new_alternative_name)
end
it "should only count completion of users on the current version" do
alternative_name = ab_test("link_color", "blue", "red")
expect(ab_user["link_color"]).to eq(alternative_name)
Split::Alternative.new(alternative_name, "link_color")
experiment.reset
expect(experiment.version).to eq(1)
ab_finished("link_color")
alternative = Split::Alternative.new(alternative_name, "link_color")
expect(alternative.completed_count).to eq(0)
end
end
context "when redis is not available" do
before(:each) do
expect(Split).to receive(:redis).at_most(5).times.and_raise(Errno::ECONNREFUSED.new)
end
context "and db_failover config option is turned off" do
before(:each) do
Split.configure do |config|
config.db_failover = false
end
end
describe "ab_test" do
it "should raise an exception" do
expect { ab_test("link_color", "blue", "red") }.to raise_error(Errno::ECONNREFUSED)
end
end
describe "finished" do
it "should raise an exception" do
expect { ab_finished("link_color") }.to raise_error(Errno::ECONNREFUSED)
end
end
describe "disable split testing" do
before(:each) do
Split.configure do |config|
config.enabled = false
end
end
it "should not attempt to connect to redis" do
expect { ab_test("link_color", "blue", "red") }.not_to raise_error
end
it "should return control variable" do
expect(ab_test("link_color", "blue", "red")).to eq("blue")
expect { ab_finished("link_color") }.not_to raise_error
end
end
end
context "and db_failover config option is turned on" do
before(:each) do
Split.configure do |config|
config.db_failover = true
end
end
describe "ab_test" do
it "should not raise an exception" do
expect { ab_test("link_color", "blue", "red") }.not_to raise_error
end
it "should call db_failover_on_db_error proc with error as parameter" do
Split.configure do |config|
config.db_failover_on_db_error = proc do |error|
expect(error).to be_a(Errno::ECONNREFUSED)
end
end
expect(Split.configuration.db_failover_on_db_error).to receive(:call).and_call_original
ab_test("link_color", "blue", "red")
end
it "should always use first alternative" do
expect(ab_test("link_color", "blue", "red")).to eq("blue")
expect(ab_test("link_color", { "blue" => 0.01 }, "red" => 0.2)).to eq("blue")
expect(ab_test("link_color", { "blue" => 0.8 }, { "red" => 20 })).to eq("blue")
expect(ab_test("link_color", "blue", "red") do |alternative|
"shared/#{alternative}"
end).to eq("shared/blue")
end
context "and db_failover_allow_parameter_override config option is turned on" do
before(:each) do
Split.configure do |config|
config.db_failover_allow_parameter_override = true
end
end
context "and given an override parameter" do
it "should use given override instead of the first alternative" do
@params = { "ab_test" => { "link_color" => "red" } }
expect(ab_test("link_color", "blue", "red")).to eq("red")
expect(ab_test("link_color", "blue", "red", "green")).to eq("red")
expect(ab_test("link_color", { "blue" => 0.01 }, "red" => 0.2)).to eq("red")
expect(ab_test("link_color", { "blue" => 0.8 }, { "red" => 20 })).to eq("red")
expect(ab_test("link_color", "blue", "red") do |alternative|
"shared/#{alternative}"
end).to eq("shared/red")
end
end
end
context "and preloaded config given" do
before do
Split.configuration.experiments[:link_color] = {
alternatives: [ "blue", "red" ],
}
end
it "uses first alternative" do
expect(ab_test(:link_color)).to eq("blue")
end
end
end
describe "finished" do
it "should not raise an exception" do
expect { ab_finished("link_color") }.not_to raise_error
end
it "should call db_failover_on_db_error proc with error as parameter" do
Split.configure do |config|
config.db_failover_on_db_error = proc do |error|
expect(error).to be_a(Errno::ECONNREFUSED)
end
end
expect(Split.configuration.db_failover_on_db_error).to receive(:call).and_call_original
ab_finished("link_color")
end
end
end
end
context "with preloaded config" do
before { Split.configuration.experiments = {} }
it "pulls options from config file" do
Split.configuration.experiments[:my_experiment] = {
alternatives: [ "control_opt", "other_opt" ],
goals: ["goal1", "goal2"]
}
ab_test :my_experiment
expect(Split::Experiment.new(:my_experiment).alternatives.map(&:name)).to eq([ "control_opt", "other_opt" ])
expect(Split::Experiment.new(:my_experiment).goals).to eq([ "goal1", "goal2" ])
end
it "can be called multiple times" do
Split.configuration.experiments[:my_experiment] = {
alternatives: [ "control_opt", "other_opt" ],
goals: ["goal1", "goal2"]
}
5.times { ab_test :my_experiment }
experiment = Split::Experiment.new(:my_experiment)
expect(experiment.alternatives.map(&:name)).to eq([ "control_opt", "other_opt" ])
expect(experiment.goals).to eq([ "goal1", "goal2" ])
expect(experiment.participant_count).to eq(1)
end
it "accepts multiple goals" do
Split.configuration.experiments[:my_experiment] = {
alternatives: [ "control_opt", "other_opt" ],
goals: [ "goal1", "goal2", "goal3" ]
}
ab_test :my_experiment
experiment = Split::Experiment.new(:my_experiment)
expect(experiment.goals).to eq([ "goal1", "goal2", "goal3" ])
end
it "allow specifying goals to be optional" do
Split.configuration.experiments[:my_experiment] = {
alternatives: [ "control_opt", "other_opt" ]
}
experiment = Split::Experiment.new(:my_experiment)
expect(experiment.goals).to eq([])
end
it "accepts multiple alternatives" do
Split.configuration.experiments[:my_experiment] = {
alternatives: [ "control_opt", "second_opt", "third_opt" ],
}
ab_test :my_experiment
experiment = Split::Experiment.new(:my_experiment)
expect(experiment.alternatives.map(&:name)).to eq([ "control_opt", "second_opt", "third_opt" ])
end
it "accepts probability on alternatives" do
Split.configuration.experiments[:my_experiment] = {
alternatives: [
{ name: "control_opt", percent: 67 },
{ name: "second_opt", percent: 10 },
{ name: "third_opt", percent: 23 },
],
}
ab_test :my_experiment
experiment = Split::Experiment.new(:my_experiment)
expect(experiment.alternatives.collect { |a| [a.name, a.weight] }).to eq([["control_opt", 0.67], ["second_opt", 0.1], ["third_opt", 0.23]])
end
it "accepts probability on some alternatives" do
Split.configuration.experiments[:my_experiment] = {
alternatives: [
{ name: "control_opt", percent: 34 },
"second_opt",
{ name: "third_opt", percent: 23 },
"fourth_opt",
],
}
ab_test :my_experiment
experiment = Split::Experiment.new(:my_experiment)
names_and_weights = experiment.alternatives.collect { |a| [a.name, a.weight] }
expect(names_and_weights).to eq([["control_opt", 0.34], ["second_opt", 0.215], ["third_opt", 0.23], ["fourth_opt", 0.215]])
expect(names_and_weights.inject(0) { |sum, nw| sum + nw[1] }).to eq(1.0)
end
it "allows name param without probability" do
Split.configuration.experiments[:my_experiment] = {
alternatives: [
{ name: "control_opt" },
"second_opt",
{ name: "third_opt", percent: 64 },
],
}
ab_test :my_experiment
experiment = Split::Experiment.new(:my_experiment)
names_and_weights = experiment.alternatives.collect { |a| [a.name, a.weight] }
expect(names_and_weights).to eq([["control_opt", 0.18], ["second_opt", 0.18], ["third_opt", 0.64]])
expect(names_and_weights.inject(0) { |sum, nw| sum + nw[1] }).to eq(1.0)
end
it "fails gracefully if config is missing experiment" do
Split.configuration.experiments = { other_experiment: { foo: "Bar" } }
expect { ab_test :my_experiment }.to raise_error(Split::ExperimentNotFound)
end
it "fails gracefully if config is missing" do
expect { Split.configuration.experiments = nil }.to raise_error(Split::InvalidExperimentsFormatError)
end
it "fails gracefully if config is missing alternatives" do
Split.configuration.experiments[:my_experiment] = { foo: "Bar" }
expect { ab_test :my_experiment }.to raise_error(NoMethodError)
end
end
it "should handle multiple experiments correctly" do
experiment2 = Split::ExperimentCatalog.find_or_create("link_color2", "blue", "red")
ab_test("link_color", "blue", "red")
ab_test("link_color2", "blue", "red")
ab_finished("link_color2")
experiment2.alternatives.each do |alt|
expect(alt.unfinished_count).to eq(0)
end
end
context "with goals" do
before do
@experiment = { "link_color" => ["purchase", "refund"] }
@alternatives = ["blue", "red"]
@experiment_name, @goals = normalize_metric(@experiment)
@goal1 = @goals[0]
@goal2 = @goals[1]
end
it "should normalize experiment" do
expect(@experiment_name).to eq("link_color")
expect(@goals).to eq(["purchase", "refund"])
end
describe "ab_test" do
it "should allow experiment goals interface as a single hash" do
ab_test(@experiment, *@alternatives)
experiment = Split::ExperimentCatalog.find("link_color")
expect(experiment.goals).to eq(["purchase", "refund"])
end
end
describe "ab_finished" do
before do
@alternative_name = ab_test(@experiment, *@alternatives)
end
it "should increment the counter for the specified-goal completed alternative" do
expect { ab_finished({ "link_color" => ["purchase"] }) }
.to change { Split::Alternative.new(@alternative_name, @experiment_name).completed_count(@goal2) }.by(0)
.and change { Split::Alternative.new(@alternative_name, @experiment_name).completed_count(@goal1) }.by(1)
end
end
end
end
<MSG> Merge pull request #148 from andrew/custom_exclude_logic
Custom exclude logic
<DFF> @@ -380,6 +380,26 @@ describe Split::Helper do
end
end
+
+ describe 'when providing custom ignore logic' do
+ context "using a proc to configure custom logic" do
+
+ before(:each) do
+ Split.configure do |c|
+ c.ignore_filter = proc{|request| !!"i_am_going_to_be_disabled" }
+ end
+ end
+
+ it "ignores the ab_test" do
+ ab_test('link_color', 'blue', 'red')
+
+ red_count = Split::Alternative.new('red', 'link_color').participant_count
+ blue_count = Split::Alternative.new('blue', 'link_color').participant_count
+ (red_count + blue_count).should be(0)
+ end
+ end
+ end
+
shared_examples_for "a disabled test" do
describe 'ab_test' do
| 20 | Merge pull request #148 from andrew/custom_exclude_logic | 0 | .rb | rb | mit | splitrb/split |
10071086 | <NME> helper_spec.rb
<BEF> # frozen_string_literal: true
require "spec_helper"
# TODO change some of these tests to use Rack::Test
describe Split::Helper do
include Split::Helper
let(:experiment) {
Split::ExperimentCatalog.find_or_create("link_color", "blue", "red")
}
describe "ab_test" do
it "should not raise an error when passed strings for alternatives" do
expect { ab_test("xyz", "1", "2", "3") }.not_to raise_error
end
it "should not raise an error when passed an array for alternatives" do
expect { ab_test("xyz", ["1", "2", "3"]) }.not_to raise_error
end
it "should raise the appropriate error when passed integers for alternatives" do
expect { ab_test("xyz", 1, 2, 3) }.to raise_error(ArgumentError)
end
it "should raise the appropriate error when passed symbols for alternatives" do
expect { ab_test("xyz", :a, :b, :c) }.to raise_error(ArgumentError)
end
it "should not raise error when passed an array for goals" do
expect { ab_test({ "link_color" => ["purchase", "refund"] }, "blue", "red") }.not_to raise_error
end
it "should not raise error when passed just one goal" do
expect { ab_test({ "link_color" => "purchase" }, "blue", "red") }.not_to raise_error
end
it "raises an appropriate error when processing combined expirements" do
Split.configuration.experiments = {
combined_exp_1: {
alternatives: [ { name: "control", percent: 50 }, { name: "test-alt", percent: 50 } ],
metric: :my_metric,
combined_experiments: [:combined_exp_1_sub_1]
}
}
Split::ExperimentCatalog.find_or_create("combined_exp_1")
expect { ab_test("combined_exp_1") }.to raise_error(Split::InvalidExperimentsFormatError)
end
it "should assign a random alternative to a new user when there are an equal number of alternatives assigned" do
ab_test("link_color", "blue", "red")
expect(["red", "blue"]).to include(ab_user["link_color"])
end
it "should increment the participation counter after assignment to a new user" do
previous_red_count = Split::Alternative.new("red", "link_color").participant_count
previous_blue_count = Split::Alternative.new("blue", "link_color").participant_count
ab_test("link_color", "blue", "red")
new_red_count = Split::Alternative.new("red", "link_color").participant_count
new_blue_count = Split::Alternative.new("blue", "link_color").participant_count
expect((new_red_count + new_blue_count)).to eq(previous_red_count + previous_blue_count + 1)
end
it "should not increment the counter for an experiment that the user is not participating in" do
ab_test("link_color", "blue", "red")
e = Split::ExperimentCatalog.find_or_create("button_size", "small", "big")
expect {
# User shouldn't participate in this second experiment
ab_test("button_size", "small", "big")
}.not_to change { e.participant_count }
end
it "should not increment the counter for an ended experiment" do
e = Split::ExperimentCatalog.find_or_create("button_size", "small", "big")
e.winner = "small"
expect {
a = ab_test("button_size", "small", "big")
expect(a).to eq("small")
}.not_to change { e.participant_count }
end
it "should not increment the counter for an not started experiment" do
expect(Split.configuration).to receive(:start_manually).and_return(true)
e = Split::ExperimentCatalog.find_or_create("button_size", "small", "big")
expect {
a = ab_test("button_size", "small", "big")
expect(a).to eq("small")
}.not_to change { e.participant_count }
end
it "should return the given alternative for an existing user" do
expect(ab_test("link_color", "blue", "red")).to eq ab_test("link_color", "blue", "red")
end
it "should always return the winner if one is present" do
experiment.winner = "orange"
expect(ab_test("link_color", "blue", "red")).to eq("orange")
end
it "should allow the alternative to be forced by passing it in the params" do
# ?ab_test[link_color]=blue
@params = { "ab_test" => { "link_color" => "blue" } }
alternative = ab_test("link_color", "blue", "red")
expect(alternative).to eq("blue")
alternative = ab_test("link_color", { "blue" => 1 }, "red" => 5)
expect(alternative).to eq("blue")
@params = { "ab_test" => { "link_color" => "red" } }
alternative = ab_test("link_color", "blue", "red")
expect(alternative).to eq("red")
alternative = ab_test("link_color", { "blue" => 5 }, "red" => 1)
expect(alternative).to eq("red")
end
it "should not allow an arbitrary alternative" do
@params = { "ab_test" => { "link_color" => "pink" } }
alternative = ab_test("link_color", "blue")
expect(alternative).to eq("blue")
end
it "should not store the split when a param forced alternative" do
@params = { "ab_test" => { "link_color" => "blue" } }
expect(ab_user).not_to receive(:[]=)
ab_test("link_color", "blue", "red")
end
it "SPLIT_DISABLE query parameter should also force the alternative (uses control)" do
@params = { "SPLIT_DISABLE" => "true" }
alternative = ab_test("link_color", "blue", "red")
expect(alternative).to eq("blue")
alternative = ab_test("link_color", { "blue" => 1 }, "red" => 5)
expect(alternative).to eq("blue")
alternative = ab_test("link_color", "red", "blue")
expect(alternative).to eq("red")
alternative = ab_test("link_color", { "red" => 5 }, "blue" => 1)
expect(alternative).to eq("red")
end
it "should not store the split when Split generically disabled" do
@params = { "SPLIT_DISABLE" => "true" }
expect(ab_user).not_to receive(:[]=)
ab_test("link_color", "blue", "red")
end
context "when store_override is set" do
before { Split.configuration.store_override = true }
it "should store the forced alternative" do
@params = { "ab_test" => { "link_color" => "blue" } }
expect(ab_user).to receive(:[]=).with("link_color", "blue")
ab_test("link_color", "blue", "red")
end
end
context "when on_trial_choose is set" do
before { Split.configuration.on_trial_choose = :some_method }
it "should call the method" do
expect(self).to receive(:some_method)
ab_test("link_color", "blue", "red")
end
end
it "should allow passing a block" do
alt = ab_test("link_color", "blue", "red")
ret = ab_test("link_color", "blue", "red") { |alternative| "shared/#{alternative}" }
expect(ret).to eq("shared/#{alt}")
end
it "should allow the share of visitors see an alternative to be specified" do
ab_test("link_color", { "blue" => 0.8 }, { "red" => 20 })
expect(["red", "blue"]).to include(ab_user["link_color"])
end
it "should allow alternative weighting interface as a single hash" do
ab_test("link_color", { "blue" => 0.01 }, "red" => 0.2)
experiment = Split::ExperimentCatalog.find("link_color")
expect(experiment.alternatives.map(&:name)).to eq(["blue", "red"])
expect(experiment.alternatives.collect { |a| a.weight }).to match_array([0.01, 0.2])
end
it "should only let a user participate in one experiment at a time" do
link_color = ab_test("link_color", "blue", "red")
ab_test("button_size", "small", "big")
expect(ab_user["link_color"]).to eq(link_color)
big = Split::Alternative.new("big", "button_size")
expect(big.participant_count).to eq(0)
small = Split::Alternative.new("small", "button_size")
expect(small.participant_count).to eq(0)
end
it "should let a user participate in many experiment with allow_multiple_experiments option" do
Split.configure do |config|
config.allow_multiple_experiments = true
end
link_color = ab_test("link_color", "blue", "red")
button_size = ab_test("button_size", "small", "big")
expect(ab_user["link_color"]).to eq(link_color)
expect(ab_user["button_size"]).to eq(button_size)
button_size_alt = Split::Alternative.new(button_size, "button_size")
expect(button_size_alt.participant_count).to eq(1)
end
context "with allow_multiple_experiments = 'control'" do
it "should let a user participate in many experiment with one non-'control' alternative" do
Split.configure do |config|
config.allow_multiple_experiments = "control"
end
groups = 100.times.map do |n|
ab_test("test#{n}".to_sym, { "control" => (100 - n) }, { "test#{n}-alt" => n })
end
experiments = ab_user.active_experiments
expect(experiments.size).to be > 1
count_control = experiments.values.count { |g| g == "control" }
expect(count_control).to eq(experiments.size - 1)
count_alts = groups.count { |g| g != "control" }
expect(count_alts).to eq(1)
end
context "when user already has experiment" do
let(:mock_user) { Split::User.new(self, { "test_0" => "test-alt" }) }
before do
Split.configure do |config|
config.allow_multiple_experiments = "control"
end
Split::ExperimentCatalog.find_or_initialize("test_0", "control", "test-alt").save
Split::ExperimentCatalog.find_or_initialize("test_1", "control", "test-alt").save
end
it "should restore previously selected alternative" do
expect(ab_user.active_experiments.size).to eq 1
expect(ab_test(:test_0, { "control" => 100 }, { "test-alt" => 1 })).to eq "test-alt"
expect(ab_test(:test_0, { "control" => 1 }, { "test-alt" => 100 })).to eq "test-alt"
end
it "should select the correct alternatives after experiment resets" do
experiment = Split::ExperimentCatalog.find(:test_0)
experiment.reset
mock_user[experiment.key] = "test-alt"
expect(ab_user.active_experiments.size).to eq 1
expect(ab_test(:test_0, { "control" => 100 }, { "test-alt" => 1 })).to eq "test-alt"
expect(ab_test(:test_0, { "control" => 0 }, { "test-alt" => 100 })).to eq "test-alt"
end
it "lets override existing choice" do
pending "this requires user store reset on first call not depending on whelther it is current trial"
@params = { "ab_test" => { "test_1" => "test-alt" } }
expect(ab_test(:test_0, { "control" => 0 }, { "test-alt" => 100 })).to eq "control"
expect(ab_test(:test_1, { "control" => 100 }, { "test-alt" => 1 })).to eq "test-alt"
end
end
end
it "should not over-write a finished key when an experiment is on a later version" do
experiment.increment_version
ab_user = { experiment.key => "blue", experiment.finished_key => true }
finished_session = ab_user.dup
ab_test("link_color", "blue", "red")
expect(ab_user).to eq(finished_session)
end
end
describe "metadata" do
context "is defined" do
before do
Split.configuration.experiments = {
my_experiment: {
alternatives: ["one", "two"],
resettable: false,
metadata: { "one" => "Meta1", "two" => "Meta2" }
}
}
end
it "should be passed to helper block" do
@params = { "ab_test" => { "my_experiment" => "two" } }
expect(ab_test("my_experiment")).to eq "two"
expect(ab_test("my_experiment") do |alternative, meta|
meta
end).to eq("Meta2")
end
it "should pass control metadata helper block if library disabled" do
Split.configure do |config|
config.enabled = false
end
expect(ab_test("my_experiment")).to eq "one"
expect(ab_test("my_experiment") do |_, meta|
meta
end).to eq("Meta1")
end
end
context "is not defined" do
before do
Split.configuration.experiments = {
my_experiment: {
alternatives: ["one", "two"],
resettable: false,
metadata: nil
}
}
end
it "should be passed to helper block" do
expect(ab_test("my_experiment") do |alternative, meta|
meta
end).to eq({})
end
it "should pass control metadata helper block if library disabled" do
Split.configure do |config|
config.enabled = false
end
expect(ab_test("my_experiment") do |_, meta|
meta
end).to eq({})
end
end
end
describe "ab_finished" do
context "for an experiment that the user participates in" do
before(:each) do
@experiment_name = "link_color"
@alternatives = ["blue", "red"]
@experiment = Split::ExperimentCatalog.find_or_create(@experiment_name, *@alternatives)
@alternative_name = ab_test(@experiment_name, *@alternatives)
@previous_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count
end
it "should increment the counter for the completed alternative" do
ab_finished(@experiment_name)
new_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count
expect(new_completion_count).to eq(@previous_completion_count + 1)
end
it "should set experiment's finished key if reset is false" do
ab_finished(@experiment_name, { reset: false })
expect(ab_user[@experiment.key]).to eq(@alternative_name)
expect(ab_user[@experiment.finished_key]).to eq(true)
end
it "should not increment the counter if reset is false and the experiment has been already finished" do
2.times { ab_finished(@experiment_name, { reset: false }) }
new_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count
expect(new_completion_count).to eq(@previous_completion_count + 1)
end
it "should not increment the counter for an ended experiment" do
e = Split::ExperimentCatalog.find_or_create("button_size", "small", "big")
e.winner = "small"
a = ab_test("button_size", "small", "big")
expect(a).to eq("small")
expect {
ab_finished("button_size")
}.not_to change { Split::Alternative.new(a, "button_size").completed_count }
end
it "should clear out the user's participation from their session" do
expect(ab_user[@experiment.key]).to eq(@alternative_name)
ab_finished(@experiment_name)
end
end
shared_examples_for "a disabled test" do
describe 'ab_test' do
before { Split.configuration.on_trial_complete = :some_method }
it "should call the method" do
expect(self).to receive(:some_method)
ab_finished(@experiment_name)
end
it "should not call the method without alternative" do
ab_user[@experiment.key] = nil
expect(self).not_to receive(:some_method)
ab_finished(@experiment_name)
end
end
end
context "for an experiment that the user is excluded from" do
before do
alternative = ab_test("link_color", "blue", "red")
expect(Split::Alternative.new(alternative, "link_color").participant_count).to eq(1)
alternative = ab_test("button_size", "small", "big")
expect(Split::Alternative.new(alternative, "button_size").participant_count).to eq(0)
end
it "should not increment the completed counter" do
# So, user should be participating in the link_color experiment and
# receive the control for button_size. As the user is not participating in
# the button size experiment, finishing it should not increase the
# completion count for that alternative.
expect {
ab_finished("button_size")
}.not_to change { Split::Alternative.new("small", "button_size").completed_count }
end
end
context "for an experiment that the user does not participate in" do
before do
Split::ExperimentCatalog.find_or_create(:not_started_experiment, "control", "alt")
end
it "should not raise an exception" do
expect { ab_finished(:not_started_experiment) }.not_to raise_exception
end
it "should not change the user state when reset is false" do
expect { ab_finished(:not_started_experiment, reset: false) }.not_to change { ab_user.keys }.from([])
end
it "should not change the user state when reset is true" do
expect(self).not_to receive(:reset!)
ab_finished(:not_started_experiment)
end
it "should not increment the completed counter" do
ab_finished(:not_started_experiment)
expect(Split::Alternative.new("control", :not_started_experiment).completed_count).to eq(0)
expect(Split::Alternative.new("alt", :not_started_experiment).completed_count).to eq(0)
end
end
end
context "finished with config" do
it "passes reset option" do
Split.configuration.experiments = {
my_experiment: {
alternatives: ["one", "two"],
resettable: false,
}
}
alternative = ab_test(:my_experiment)
experiment = Split::ExperimentCatalog.find :my_experiment
ab_finished :my_experiment
expect(ab_user[experiment.key]).to eq(alternative)
expect(ab_user[experiment.finished_key]).to eq(true)
end
end
context "finished with metric name" do
before { Split.configuration.experiments = {} }
before { expect(Split::Alternative).to receive(:new).at_least(1).times.and_call_original }
def should_finish_experiment(experiment_name, should_finish = true)
alts = Split.configuration.experiments[experiment_name][:alternatives]
experiment = Split::ExperimentCatalog.find_or_create(experiment_name, *alts)
alt_name = ab_user[experiment.key] = alts.first
alt = double("alternative")
expect(alt).to receive(:name).at_most(1).times.and_return(alt_name)
expect(Split::Alternative).to receive(:new).at_most(1).times.with(alt_name, experiment_name.to_s).and_return(alt)
if should_finish
expect(alt).to receive(:increment_completion).at_most(1).times
else
expect(alt).not_to receive(:increment_completion)
end
end
it "completes the test" do
Split.configuration.experiments[:my_experiment] = {
alternatives: [ "control_opt", "other_opt" ],
metric: :my_metric
}
should_finish_experiment :my_experiment
ab_finished :my_metric
end
it "completes all relevant tests" do
Split.configuration.experiments = {
exp_1: {
alternatives: [ "1-1", "1-2" ],
metric: :my_metric
},
exp_2: {
alternatives: [ "2-1", "2-2" ],
metric: :another_metric
},
exp_3: {
alternatives: [ "3-1", "3-2" ],
metric: :my_metric
},
}
should_finish_experiment :exp_1
should_finish_experiment :exp_2, false
should_finish_experiment :exp_3
ab_finished :my_metric
end
it "passes reset option" do
Split.configuration.experiments = {
my_exp: {
alternatives: ["one", "two"],
metric: :my_metric,
resettable: false,
}
}
alternative_name = ab_test(:my_exp)
exp = Split::ExperimentCatalog.find :my_exp
ab_finished :my_metric
expect(ab_user[exp.key]).to eq(alternative_name)
expect(ab_user[exp.finished_key]).to be_truthy
end
it "passes through options" do
Split.configuration.experiments = {
my_exp: {
alternatives: ["one", "two"],
metric: :my_metric,
}
}
alternative_name = ab_test(:my_exp)
exp = Split::ExperimentCatalog.find :my_exp
ab_finished :my_metric, reset: false
expect(ab_user[exp.key]).to eq(alternative_name)
expect(ab_user[exp.finished_key]).to be_truthy
end
end
describe "conversions" do
it "should return a conversion rate for an alternative" do
alternative_name = ab_test("link_color", "blue", "red")
previous_convertion_rate = Split::Alternative.new(alternative_name, "link_color").conversion_rate
expect(previous_convertion_rate).to eq(0.0)
ab_finished("link_color")
new_convertion_rate = Split::Alternative.new(alternative_name, "link_color").conversion_rate
expect(new_convertion_rate).to eq(1.0)
end
end
describe "active experiments" do
it "should show an active test" do
alternative = ab_test("def", "4", "5", "6")
expect(active_experiments.count).to eq 1
expect(active_experiments.first[0]).to eq "def"
expect(active_experiments.first[1]).to eq alternative
end
it "should show a finished test" do
alternative = ab_test("def", "4", "5", "6")
ab_finished("def", { reset: false })
expect(active_experiments.count).to eq 1
expect(active_experiments.first[0]).to eq "def"
expect(active_experiments.first[1]).to eq alternative
end
it "should show an active test when an experiment is on a later version" do
experiment.reset
expect(experiment.version).to eq(1)
ab_test("link_color", "blue", "red")
expect(active_experiments.count).to eq 1
expect(active_experiments.first[0]).to eq "link_color"
end
it "should show versioned tests properly" do
10.times { experiment.reset }
alternative = ab_test(experiment.name, "blue", "red")
ab_finished(experiment.name, reset: false)
expect(experiment.version).to eq(10)
expect(active_experiments.count).to eq 1
expect(active_experiments).to eq({ "link_color" => alternative })
end
it "should show multiple tests" do
Split.configure do |config|
config.allow_multiple_experiments = true
end
alternative = ab_test("def", "4", "5", "6")
another_alternative = ab_test("ghi", "7", "8", "9")
expect(active_experiments.count).to eq 2
expect(active_experiments["def"]).to eq alternative
expect(active_experiments["ghi"]).to eq another_alternative
end
it "should not show tests with winners" do
Split.configure do |config|
config.allow_multiple_experiments = true
end
e = Split::ExperimentCatalog.find_or_create("def", "4", "5", "6")
e.winner = "4"
ab_test("def", "4", "5", "6")
another_alternative = ab_test("ghi", "7", "8", "9")
expect(active_experiments.count).to eq 1
expect(active_experiments.first[0]).to eq "ghi"
expect(active_experiments.first[1]).to eq another_alternative
end
end
describe "when user is a robot" do
before(:each) do
@request = OpenStruct.new(user_agent: "Googlebot/2.1 (+http://www.google.com/bot.html)")
end
describe "ab_test" do
it "should return the control" do
alternative = ab_test("link_color", "blue", "red")
expect(alternative).to eq experiment.control.name
end
it "should not create a experiment" do
ab_test("link_color", "blue", "red")
expect(Split::Experiment.new("link_color")).to be_a_new_record
end
it "should not increment the participation count" do
previous_red_count = Split::Alternative.new("red", "link_color").participant_count
previous_blue_count = Split::Alternative.new("blue", "link_color").participant_count
ab_test("link_color", "blue", "red")
new_red_count = Split::Alternative.new("red", "link_color").participant_count
new_blue_count = Split::Alternative.new("blue", "link_color").participant_count
expect((new_red_count + new_blue_count)).to eq(previous_red_count + previous_blue_count)
end
end
describe "finished" do
it "should not increment the completed count" do
alternative_name = ab_test("link_color", "blue", "red")
previous_completion_count = Split::Alternative.new(alternative_name, "link_color").completed_count
ab_finished("link_color")
new_completion_count = Split::Alternative.new(alternative_name, "link_color").completed_count
expect(new_completion_count).to eq(previous_completion_count)
end
end
end
describe "when providing custom ignore logic" do
context "using a proc to configure custom logic" do
before(:each) do
Split.configure do |c|
c.ignore_filter = proc { |request| true } # ignore everything
end
end
it "ignores the ab_test" do
ab_test("link_color", "blue", "red")
red_count = Split::Alternative.new("red", "link_color").participant_count
blue_count = Split::Alternative.new("blue", "link_color").participant_count
expect((red_count + blue_count)).to be(0)
end
end
end
shared_examples_for "a disabled test" do
describe "ab_test" do
it "should return the control" do
alternative = ab_test("link_color", "blue", "red")
expect(alternative).to eq experiment.control.name
end
it "should not increment the participation count" do
previous_red_count = Split::Alternative.new("red", "link_color").participant_count
previous_blue_count = Split::Alternative.new("blue", "link_color").participant_count
ab_test("link_color", "blue", "red")
new_red_count = Split::Alternative.new("red", "link_color").participant_count
new_blue_count = Split::Alternative.new("blue", "link_color").participant_count
expect((new_red_count + new_blue_count)).to eq(previous_red_count + previous_blue_count)
end
end
describe "finished" do
it "should not increment the completed count" do
alternative_name = ab_test("link_color", "blue", "red")
previous_completion_count = Split::Alternative.new(alternative_name, "link_color").completed_count
ab_finished("link_color")
new_completion_count = Split::Alternative.new(alternative_name, "link_color").completed_count
expect(new_completion_count).to eq(previous_completion_count)
end
end
end
describe "when ip address is ignored" do
context "individually" do
before(:each) do
@request = OpenStruct.new(ip: "81.19.48.130")
Split.configure do |c|
c.ignore_ip_addresses << "81.19.48.130"
end
end
it_behaves_like "a disabled test"
end
context "for a range" do
before(:each) do
@request = OpenStruct.new(ip: "81.19.48.129")
Split.configure do |c|
c.ignore_ip_addresses << /81\.19\.48\.[0-9]+/
end
end
it_behaves_like "a disabled test"
end
context "using both a range and a specific value" do
before(:each) do
@request = OpenStruct.new(ip: "81.19.48.128")
Split.configure do |c|
c.ignore_ip_addresses << "81.19.48.130"
c.ignore_ip_addresses << /81\.19\.48\.[0-9]+/
end
end
it_behaves_like "a disabled test"
end
context "when ignored other address" do
before do
@request = OpenStruct.new(ip: "1.1.1.1")
Split.configure do |c|
c.ignore_ip_addresses << "81.19.48.130"
end
end
it "works as usual" do
alternative_name = ab_test("link_color", "red", "blue")
expect {
ab_finished("link_color")
}.to change(Split::Alternative.new(alternative_name, "link_color"), :completed_count).by(1)
end
end
end
describe "when user is previewing" do
before(:each) do
@request = OpenStruct.new(headers: { "x-purpose" => "preview" })
end
it_behaves_like "a disabled test"
end
describe "versioned experiments" do
it "should use version zero if no version is present" do
alternative_name = ab_test("link_color", "blue", "red")
expect(experiment.version).to eq(0)
expect(ab_user["link_color"]).to eq(alternative_name)
end
it "should save the version of the experiment to the session" do
experiment.reset
expect(experiment.version).to eq(1)
alternative_name = ab_test("link_color", "blue", "red")
expect(ab_user["link_color:1"]).to eq(alternative_name)
end
it "should load the experiment even if the version is not 0" do
experiment.reset
expect(experiment.version).to eq(1)
alternative_name = ab_test("link_color", "blue", "red")
expect(ab_user["link_color:1"]).to eq(alternative_name)
return_alternative_name = ab_test("link_color", "blue", "red")
expect(return_alternative_name).to eq(alternative_name)
end
it "should reset the session of a user on an older version of the experiment" do
alternative_name = ab_test("link_color", "blue", "red")
expect(ab_user["link_color"]).to eq(alternative_name)
alternative = Split::Alternative.new(alternative_name, "link_color")
expect(alternative.participant_count).to eq(1)
experiment.reset
expect(experiment.version).to eq(1)
alternative = Split::Alternative.new(alternative_name, "link_color")
expect(alternative.participant_count).to eq(0)
new_alternative_name = ab_test("link_color", "blue", "red")
expect(ab_user["link_color:1"]).to eq(new_alternative_name)
new_alternative = Split::Alternative.new(new_alternative_name, "link_color")
expect(new_alternative.participant_count).to eq(1)
end
it "should cleanup old versions of experiments from the session" do
alternative_name = ab_test("link_color", "blue", "red")
expect(ab_user["link_color"]).to eq(alternative_name)
alternative = Split::Alternative.new(alternative_name, "link_color")
expect(alternative.participant_count).to eq(1)
experiment.reset
expect(experiment.version).to eq(1)
alternative = Split::Alternative.new(alternative_name, "link_color")
expect(alternative.participant_count).to eq(0)
new_alternative_name = ab_test("link_color", "blue", "red")
expect(ab_user["link_color:1"]).to eq(new_alternative_name)
end
it "should only count completion of users on the current version" do
alternative_name = ab_test("link_color", "blue", "red")
expect(ab_user["link_color"]).to eq(alternative_name)
Split::Alternative.new(alternative_name, "link_color")
experiment.reset
expect(experiment.version).to eq(1)
ab_finished("link_color")
alternative = Split::Alternative.new(alternative_name, "link_color")
expect(alternative.completed_count).to eq(0)
end
end
context "when redis is not available" do
before(:each) do
expect(Split).to receive(:redis).at_most(5).times.and_raise(Errno::ECONNREFUSED.new)
end
context "and db_failover config option is turned off" do
before(:each) do
Split.configure do |config|
config.db_failover = false
end
end
describe "ab_test" do
it "should raise an exception" do
expect { ab_test("link_color", "blue", "red") }.to raise_error(Errno::ECONNREFUSED)
end
end
describe "finished" do
it "should raise an exception" do
expect { ab_finished("link_color") }.to raise_error(Errno::ECONNREFUSED)
end
end
describe "disable split testing" do
before(:each) do
Split.configure do |config|
config.enabled = false
end
end
it "should not attempt to connect to redis" do
expect { ab_test("link_color", "blue", "red") }.not_to raise_error
end
it "should return control variable" do
expect(ab_test("link_color", "blue", "red")).to eq("blue")
expect { ab_finished("link_color") }.not_to raise_error
end
end
end
context "and db_failover config option is turned on" do
before(:each) do
Split.configure do |config|
config.db_failover = true
end
end
describe "ab_test" do
it "should not raise an exception" do
expect { ab_test("link_color", "blue", "red") }.not_to raise_error
end
it "should call db_failover_on_db_error proc with error as parameter" do
Split.configure do |config|
config.db_failover_on_db_error = proc do |error|
expect(error).to be_a(Errno::ECONNREFUSED)
end
end
expect(Split.configuration.db_failover_on_db_error).to receive(:call).and_call_original
ab_test("link_color", "blue", "red")
end
it "should always use first alternative" do
expect(ab_test("link_color", "blue", "red")).to eq("blue")
expect(ab_test("link_color", { "blue" => 0.01 }, "red" => 0.2)).to eq("blue")
expect(ab_test("link_color", { "blue" => 0.8 }, { "red" => 20 })).to eq("blue")
expect(ab_test("link_color", "blue", "red") do |alternative|
"shared/#{alternative}"
end).to eq("shared/blue")
end
context "and db_failover_allow_parameter_override config option is turned on" do
before(:each) do
Split.configure do |config|
config.db_failover_allow_parameter_override = true
end
end
context "and given an override parameter" do
it "should use given override instead of the first alternative" do
@params = { "ab_test" => { "link_color" => "red" } }
expect(ab_test("link_color", "blue", "red")).to eq("red")
expect(ab_test("link_color", "blue", "red", "green")).to eq("red")
expect(ab_test("link_color", { "blue" => 0.01 }, "red" => 0.2)).to eq("red")
expect(ab_test("link_color", { "blue" => 0.8 }, { "red" => 20 })).to eq("red")
expect(ab_test("link_color", "blue", "red") do |alternative|
"shared/#{alternative}"
end).to eq("shared/red")
end
end
end
context "and preloaded config given" do
before do
Split.configuration.experiments[:link_color] = {
alternatives: [ "blue", "red" ],
}
end
it "uses first alternative" do
expect(ab_test(:link_color)).to eq("blue")
end
end
end
describe "finished" do
it "should not raise an exception" do
expect { ab_finished("link_color") }.not_to raise_error
end
it "should call db_failover_on_db_error proc with error as parameter" do
Split.configure do |config|
config.db_failover_on_db_error = proc do |error|
expect(error).to be_a(Errno::ECONNREFUSED)
end
end
expect(Split.configuration.db_failover_on_db_error).to receive(:call).and_call_original
ab_finished("link_color")
end
end
end
end
context "with preloaded config" do
before { Split.configuration.experiments = {} }
it "pulls options from config file" do
Split.configuration.experiments[:my_experiment] = {
alternatives: [ "control_opt", "other_opt" ],
goals: ["goal1", "goal2"]
}
ab_test :my_experiment
expect(Split::Experiment.new(:my_experiment).alternatives.map(&:name)).to eq([ "control_opt", "other_opt" ])
expect(Split::Experiment.new(:my_experiment).goals).to eq([ "goal1", "goal2" ])
end
it "can be called multiple times" do
Split.configuration.experiments[:my_experiment] = {
alternatives: [ "control_opt", "other_opt" ],
goals: ["goal1", "goal2"]
}
5.times { ab_test :my_experiment }
experiment = Split::Experiment.new(:my_experiment)
expect(experiment.alternatives.map(&:name)).to eq([ "control_opt", "other_opt" ])
expect(experiment.goals).to eq([ "goal1", "goal2" ])
expect(experiment.participant_count).to eq(1)
end
it "accepts multiple goals" do
Split.configuration.experiments[:my_experiment] = {
alternatives: [ "control_opt", "other_opt" ],
goals: [ "goal1", "goal2", "goal3" ]
}
ab_test :my_experiment
experiment = Split::Experiment.new(:my_experiment)
expect(experiment.goals).to eq([ "goal1", "goal2", "goal3" ])
end
it "allow specifying goals to be optional" do
Split.configuration.experiments[:my_experiment] = {
alternatives: [ "control_opt", "other_opt" ]
}
experiment = Split::Experiment.new(:my_experiment)
expect(experiment.goals).to eq([])
end
it "accepts multiple alternatives" do
Split.configuration.experiments[:my_experiment] = {
alternatives: [ "control_opt", "second_opt", "third_opt" ],
}
ab_test :my_experiment
experiment = Split::Experiment.new(:my_experiment)
expect(experiment.alternatives.map(&:name)).to eq([ "control_opt", "second_opt", "third_opt" ])
end
it "accepts probability on alternatives" do
Split.configuration.experiments[:my_experiment] = {
alternatives: [
{ name: "control_opt", percent: 67 },
{ name: "second_opt", percent: 10 },
{ name: "third_opt", percent: 23 },
],
}
ab_test :my_experiment
experiment = Split::Experiment.new(:my_experiment)
expect(experiment.alternatives.collect { |a| [a.name, a.weight] }).to eq([["control_opt", 0.67], ["second_opt", 0.1], ["third_opt", 0.23]])
end
it "accepts probability on some alternatives" do
Split.configuration.experiments[:my_experiment] = {
alternatives: [
{ name: "control_opt", percent: 34 },
"second_opt",
{ name: "third_opt", percent: 23 },
"fourth_opt",
],
}
ab_test :my_experiment
experiment = Split::Experiment.new(:my_experiment)
names_and_weights = experiment.alternatives.collect { |a| [a.name, a.weight] }
expect(names_and_weights).to eq([["control_opt", 0.34], ["second_opt", 0.215], ["third_opt", 0.23], ["fourth_opt", 0.215]])
expect(names_and_weights.inject(0) { |sum, nw| sum + nw[1] }).to eq(1.0)
end
it "allows name param without probability" do
Split.configuration.experiments[:my_experiment] = {
alternatives: [
{ name: "control_opt" },
"second_opt",
{ name: "third_opt", percent: 64 },
],
}
ab_test :my_experiment
experiment = Split::Experiment.new(:my_experiment)
names_and_weights = experiment.alternatives.collect { |a| [a.name, a.weight] }
expect(names_and_weights).to eq([["control_opt", 0.18], ["second_opt", 0.18], ["third_opt", 0.64]])
expect(names_and_weights.inject(0) { |sum, nw| sum + nw[1] }).to eq(1.0)
end
it "fails gracefully if config is missing experiment" do
Split.configuration.experiments = { other_experiment: { foo: "Bar" } }
expect { ab_test :my_experiment }.to raise_error(Split::ExperimentNotFound)
end
it "fails gracefully if config is missing" do
expect { Split.configuration.experiments = nil }.to raise_error(Split::InvalidExperimentsFormatError)
end
it "fails gracefully if config is missing alternatives" do
Split.configuration.experiments[:my_experiment] = { foo: "Bar" }
expect { ab_test :my_experiment }.to raise_error(NoMethodError)
end
end
it "should handle multiple experiments correctly" do
experiment2 = Split::ExperimentCatalog.find_or_create("link_color2", "blue", "red")
ab_test("link_color", "blue", "red")
ab_test("link_color2", "blue", "red")
ab_finished("link_color2")
experiment2.alternatives.each do |alt|
expect(alt.unfinished_count).to eq(0)
end
end
context "with goals" do
before do
@experiment = { "link_color" => ["purchase", "refund"] }
@alternatives = ["blue", "red"]
@experiment_name, @goals = normalize_metric(@experiment)
@goal1 = @goals[0]
@goal2 = @goals[1]
end
it "should normalize experiment" do
expect(@experiment_name).to eq("link_color")
expect(@goals).to eq(["purchase", "refund"])
end
describe "ab_test" do
it "should allow experiment goals interface as a single hash" do
ab_test(@experiment, *@alternatives)
experiment = Split::ExperimentCatalog.find("link_color")
expect(experiment.goals).to eq(["purchase", "refund"])
end
end
describe "ab_finished" do
before do
@alternative_name = ab_test(@experiment, *@alternatives)
end
it "should increment the counter for the specified-goal completed alternative" do
expect { ab_finished({ "link_color" => ["purchase"] }) }
.to change { Split::Alternative.new(@alternative_name, @experiment_name).completed_count(@goal2) }.by(0)
.and change { Split::Alternative.new(@alternative_name, @experiment_name).completed_count(@goal1) }.by(1)
end
end
end
end
<MSG> Merge pull request #148 from andrew/custom_exclude_logic
Custom exclude logic
<DFF> @@ -380,6 +380,26 @@ describe Split::Helper do
end
end
+
+ describe 'when providing custom ignore logic' do
+ context "using a proc to configure custom logic" do
+
+ before(:each) do
+ Split.configure do |c|
+ c.ignore_filter = proc{|request| !!"i_am_going_to_be_disabled" }
+ end
+ end
+
+ it "ignores the ab_test" do
+ ab_test('link_color', 'blue', 'red')
+
+ red_count = Split::Alternative.new('red', 'link_color').participant_count
+ blue_count = Split::Alternative.new('blue', 'link_color').participant_count
+ (red_count + blue_count).should be(0)
+ end
+ end
+ end
+
shared_examples_for "a disabled test" do
describe 'ab_test' do
| 20 | Merge pull request #148 from andrew/custom_exclude_logic | 0 | .rb | rb | mit | splitrb/split |
10071087 | <NME> helper_spec.rb
<BEF> # frozen_string_literal: true
require "spec_helper"
# TODO change some of these tests to use Rack::Test
describe Split::Helper do
include Split::Helper
let(:experiment) {
Split::ExperimentCatalog.find_or_create("link_color", "blue", "red")
}
describe "ab_test" do
it "should not raise an error when passed strings for alternatives" do
expect { ab_test("xyz", "1", "2", "3") }.not_to raise_error
end
it "should not raise an error when passed an array for alternatives" do
expect { ab_test("xyz", ["1", "2", "3"]) }.not_to raise_error
end
it "should raise the appropriate error when passed integers for alternatives" do
expect { ab_test("xyz", 1, 2, 3) }.to raise_error(ArgumentError)
end
it "should raise the appropriate error when passed symbols for alternatives" do
expect { ab_test("xyz", :a, :b, :c) }.to raise_error(ArgumentError)
end
it "should not raise error when passed an array for goals" do
expect { ab_test({ "link_color" => ["purchase", "refund"] }, "blue", "red") }.not_to raise_error
end
it "should not raise error when passed just one goal" do
expect { ab_test({ "link_color" => "purchase" }, "blue", "red") }.not_to raise_error
end
it "raises an appropriate error when processing combined expirements" do
Split.configuration.experiments = {
combined_exp_1: {
alternatives: [ { name: "control", percent: 50 }, { name: "test-alt", percent: 50 } ],
metric: :my_metric,
combined_experiments: [:combined_exp_1_sub_1]
}
}
Split::ExperimentCatalog.find_or_create("combined_exp_1")
expect { ab_test("combined_exp_1") }.to raise_error(Split::InvalidExperimentsFormatError)
end
it "should assign a random alternative to a new user when there are an equal number of alternatives assigned" do
ab_test("link_color", "blue", "red")
expect(["red", "blue"]).to include(ab_user["link_color"])
end
it "should increment the participation counter after assignment to a new user" do
previous_red_count = Split::Alternative.new("red", "link_color").participant_count
previous_blue_count = Split::Alternative.new("blue", "link_color").participant_count
ab_test("link_color", "blue", "red")
new_red_count = Split::Alternative.new("red", "link_color").participant_count
new_blue_count = Split::Alternative.new("blue", "link_color").participant_count
expect((new_red_count + new_blue_count)).to eq(previous_red_count + previous_blue_count + 1)
end
it "should not increment the counter for an experiment that the user is not participating in" do
ab_test("link_color", "blue", "red")
e = Split::ExperimentCatalog.find_or_create("button_size", "small", "big")
expect {
# User shouldn't participate in this second experiment
ab_test("button_size", "small", "big")
}.not_to change { e.participant_count }
end
it "should not increment the counter for an ended experiment" do
e = Split::ExperimentCatalog.find_or_create("button_size", "small", "big")
e.winner = "small"
expect {
a = ab_test("button_size", "small", "big")
expect(a).to eq("small")
}.not_to change { e.participant_count }
end
it "should not increment the counter for an not started experiment" do
expect(Split.configuration).to receive(:start_manually).and_return(true)
e = Split::ExperimentCatalog.find_or_create("button_size", "small", "big")
expect {
a = ab_test("button_size", "small", "big")
expect(a).to eq("small")
}.not_to change { e.participant_count }
end
it "should return the given alternative for an existing user" do
expect(ab_test("link_color", "blue", "red")).to eq ab_test("link_color", "blue", "red")
end
it "should always return the winner if one is present" do
experiment.winner = "orange"
expect(ab_test("link_color", "blue", "red")).to eq("orange")
end
it "should allow the alternative to be forced by passing it in the params" do
# ?ab_test[link_color]=blue
@params = { "ab_test" => { "link_color" => "blue" } }
alternative = ab_test("link_color", "blue", "red")
expect(alternative).to eq("blue")
alternative = ab_test("link_color", { "blue" => 1 }, "red" => 5)
expect(alternative).to eq("blue")
@params = { "ab_test" => { "link_color" => "red" } }
alternative = ab_test("link_color", "blue", "red")
expect(alternative).to eq("red")
alternative = ab_test("link_color", { "blue" => 5 }, "red" => 1)
expect(alternative).to eq("red")
end
it "should not allow an arbitrary alternative" do
@params = { "ab_test" => { "link_color" => "pink" } }
alternative = ab_test("link_color", "blue")
expect(alternative).to eq("blue")
end
it "should not store the split when a param forced alternative" do
@params = { "ab_test" => { "link_color" => "blue" } }
expect(ab_user).not_to receive(:[]=)
ab_test("link_color", "blue", "red")
end
it "SPLIT_DISABLE query parameter should also force the alternative (uses control)" do
@params = { "SPLIT_DISABLE" => "true" }
alternative = ab_test("link_color", "blue", "red")
expect(alternative).to eq("blue")
alternative = ab_test("link_color", { "blue" => 1 }, "red" => 5)
expect(alternative).to eq("blue")
alternative = ab_test("link_color", "red", "blue")
expect(alternative).to eq("red")
alternative = ab_test("link_color", { "red" => 5 }, "blue" => 1)
expect(alternative).to eq("red")
end
it "should not store the split when Split generically disabled" do
@params = { "SPLIT_DISABLE" => "true" }
expect(ab_user).not_to receive(:[]=)
ab_test("link_color", "blue", "red")
end
context "when store_override is set" do
before { Split.configuration.store_override = true }
it "should store the forced alternative" do
@params = { "ab_test" => { "link_color" => "blue" } }
expect(ab_user).to receive(:[]=).with("link_color", "blue")
ab_test("link_color", "blue", "red")
end
end
context "when on_trial_choose is set" do
before { Split.configuration.on_trial_choose = :some_method }
it "should call the method" do
expect(self).to receive(:some_method)
ab_test("link_color", "blue", "red")
end
end
it "should allow passing a block" do
alt = ab_test("link_color", "blue", "red")
ret = ab_test("link_color", "blue", "red") { |alternative| "shared/#{alternative}" }
expect(ret).to eq("shared/#{alt}")
end
it "should allow the share of visitors see an alternative to be specified" do
ab_test("link_color", { "blue" => 0.8 }, { "red" => 20 })
expect(["red", "blue"]).to include(ab_user["link_color"])
end
it "should allow alternative weighting interface as a single hash" do
ab_test("link_color", { "blue" => 0.01 }, "red" => 0.2)
experiment = Split::ExperimentCatalog.find("link_color")
expect(experiment.alternatives.map(&:name)).to eq(["blue", "red"])
expect(experiment.alternatives.collect { |a| a.weight }).to match_array([0.01, 0.2])
end
it "should only let a user participate in one experiment at a time" do
link_color = ab_test("link_color", "blue", "red")
ab_test("button_size", "small", "big")
expect(ab_user["link_color"]).to eq(link_color)
big = Split::Alternative.new("big", "button_size")
expect(big.participant_count).to eq(0)
small = Split::Alternative.new("small", "button_size")
expect(small.participant_count).to eq(0)
end
it "should let a user participate in many experiment with allow_multiple_experiments option" do
Split.configure do |config|
config.allow_multiple_experiments = true
end
link_color = ab_test("link_color", "blue", "red")
button_size = ab_test("button_size", "small", "big")
expect(ab_user["link_color"]).to eq(link_color)
expect(ab_user["button_size"]).to eq(button_size)
button_size_alt = Split::Alternative.new(button_size, "button_size")
expect(button_size_alt.participant_count).to eq(1)
end
context "with allow_multiple_experiments = 'control'" do
it "should let a user participate in many experiment with one non-'control' alternative" do
Split.configure do |config|
config.allow_multiple_experiments = "control"
end
groups = 100.times.map do |n|
ab_test("test#{n}".to_sym, { "control" => (100 - n) }, { "test#{n}-alt" => n })
end
experiments = ab_user.active_experiments
expect(experiments.size).to be > 1
count_control = experiments.values.count { |g| g == "control" }
expect(count_control).to eq(experiments.size - 1)
count_alts = groups.count { |g| g != "control" }
expect(count_alts).to eq(1)
end
context "when user already has experiment" do
let(:mock_user) { Split::User.new(self, { "test_0" => "test-alt" }) }
before do
Split.configure do |config|
config.allow_multiple_experiments = "control"
end
Split::ExperimentCatalog.find_or_initialize("test_0", "control", "test-alt").save
Split::ExperimentCatalog.find_or_initialize("test_1", "control", "test-alt").save
end
it "should restore previously selected alternative" do
expect(ab_user.active_experiments.size).to eq 1
expect(ab_test(:test_0, { "control" => 100 }, { "test-alt" => 1 })).to eq "test-alt"
expect(ab_test(:test_0, { "control" => 1 }, { "test-alt" => 100 })).to eq "test-alt"
end
it "should select the correct alternatives after experiment resets" do
experiment = Split::ExperimentCatalog.find(:test_0)
experiment.reset
mock_user[experiment.key] = "test-alt"
expect(ab_user.active_experiments.size).to eq 1
expect(ab_test(:test_0, { "control" => 100 }, { "test-alt" => 1 })).to eq "test-alt"
expect(ab_test(:test_0, { "control" => 0 }, { "test-alt" => 100 })).to eq "test-alt"
end
it "lets override existing choice" do
pending "this requires user store reset on first call not depending on whelther it is current trial"
@params = { "ab_test" => { "test_1" => "test-alt" } }
expect(ab_test(:test_0, { "control" => 0 }, { "test-alt" => 100 })).to eq "control"
expect(ab_test(:test_1, { "control" => 100 }, { "test-alt" => 1 })).to eq "test-alt"
end
end
end
it "should not over-write a finished key when an experiment is on a later version" do
experiment.increment_version
ab_user = { experiment.key => "blue", experiment.finished_key => true }
finished_session = ab_user.dup
ab_test("link_color", "blue", "red")
expect(ab_user).to eq(finished_session)
end
end
describe "metadata" do
context "is defined" do
before do
Split.configuration.experiments = {
my_experiment: {
alternatives: ["one", "two"],
resettable: false,
metadata: { "one" => "Meta1", "two" => "Meta2" }
}
}
end
it "should be passed to helper block" do
@params = { "ab_test" => { "my_experiment" => "two" } }
expect(ab_test("my_experiment")).to eq "two"
expect(ab_test("my_experiment") do |alternative, meta|
meta
end).to eq("Meta2")
end
it "should pass control metadata helper block if library disabled" do
Split.configure do |config|
config.enabled = false
end
expect(ab_test("my_experiment")).to eq "one"
expect(ab_test("my_experiment") do |_, meta|
meta
end).to eq("Meta1")
end
end
context "is not defined" do
before do
Split.configuration.experiments = {
my_experiment: {
alternatives: ["one", "two"],
resettable: false,
metadata: nil
}
}
end
it "should be passed to helper block" do
expect(ab_test("my_experiment") do |alternative, meta|
meta
end).to eq({})
end
it "should pass control metadata helper block if library disabled" do
Split.configure do |config|
config.enabled = false
end
expect(ab_test("my_experiment") do |_, meta|
meta
end).to eq({})
end
end
end
describe "ab_finished" do
context "for an experiment that the user participates in" do
before(:each) do
@experiment_name = "link_color"
@alternatives = ["blue", "red"]
@experiment = Split::ExperimentCatalog.find_or_create(@experiment_name, *@alternatives)
@alternative_name = ab_test(@experiment_name, *@alternatives)
@previous_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count
end
it "should increment the counter for the completed alternative" do
ab_finished(@experiment_name)
new_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count
expect(new_completion_count).to eq(@previous_completion_count + 1)
end
it "should set experiment's finished key if reset is false" do
ab_finished(@experiment_name, { reset: false })
expect(ab_user[@experiment.key]).to eq(@alternative_name)
expect(ab_user[@experiment.finished_key]).to eq(true)
end
it "should not increment the counter if reset is false and the experiment has been already finished" do
2.times { ab_finished(@experiment_name, { reset: false }) }
new_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count
expect(new_completion_count).to eq(@previous_completion_count + 1)
end
it "should not increment the counter for an ended experiment" do
e = Split::ExperimentCatalog.find_or_create("button_size", "small", "big")
e.winner = "small"
a = ab_test("button_size", "small", "big")
expect(a).to eq("small")
expect {
ab_finished("button_size")
}.not_to change { Split::Alternative.new(a, "button_size").completed_count }
end
it "should clear out the user's participation from their session" do
expect(ab_user[@experiment.key]).to eq(@alternative_name)
ab_finished(@experiment_name)
end
end
shared_examples_for "a disabled test" do
describe 'ab_test' do
before { Split.configuration.on_trial_complete = :some_method }
it "should call the method" do
expect(self).to receive(:some_method)
ab_finished(@experiment_name)
end
it "should not call the method without alternative" do
ab_user[@experiment.key] = nil
expect(self).not_to receive(:some_method)
ab_finished(@experiment_name)
end
end
end
context "for an experiment that the user is excluded from" do
before do
alternative = ab_test("link_color", "blue", "red")
expect(Split::Alternative.new(alternative, "link_color").participant_count).to eq(1)
alternative = ab_test("button_size", "small", "big")
expect(Split::Alternative.new(alternative, "button_size").participant_count).to eq(0)
end
it "should not increment the completed counter" do
# So, user should be participating in the link_color experiment and
# receive the control for button_size. As the user is not participating in
# the button size experiment, finishing it should not increase the
# completion count for that alternative.
expect {
ab_finished("button_size")
}.not_to change { Split::Alternative.new("small", "button_size").completed_count }
end
end
context "for an experiment that the user does not participate in" do
before do
Split::ExperimentCatalog.find_or_create(:not_started_experiment, "control", "alt")
end
it "should not raise an exception" do
expect { ab_finished(:not_started_experiment) }.not_to raise_exception
end
it "should not change the user state when reset is false" do
expect { ab_finished(:not_started_experiment, reset: false) }.not_to change { ab_user.keys }.from([])
end
it "should not change the user state when reset is true" do
expect(self).not_to receive(:reset!)
ab_finished(:not_started_experiment)
end
it "should not increment the completed counter" do
ab_finished(:not_started_experiment)
expect(Split::Alternative.new("control", :not_started_experiment).completed_count).to eq(0)
expect(Split::Alternative.new("alt", :not_started_experiment).completed_count).to eq(0)
end
end
end
context "finished with config" do
it "passes reset option" do
Split.configuration.experiments = {
my_experiment: {
alternatives: ["one", "two"],
resettable: false,
}
}
alternative = ab_test(:my_experiment)
experiment = Split::ExperimentCatalog.find :my_experiment
ab_finished :my_experiment
expect(ab_user[experiment.key]).to eq(alternative)
expect(ab_user[experiment.finished_key]).to eq(true)
end
end
context "finished with metric name" do
before { Split.configuration.experiments = {} }
before { expect(Split::Alternative).to receive(:new).at_least(1).times.and_call_original }
def should_finish_experiment(experiment_name, should_finish = true)
alts = Split.configuration.experiments[experiment_name][:alternatives]
experiment = Split::ExperimentCatalog.find_or_create(experiment_name, *alts)
alt_name = ab_user[experiment.key] = alts.first
alt = double("alternative")
expect(alt).to receive(:name).at_most(1).times.and_return(alt_name)
expect(Split::Alternative).to receive(:new).at_most(1).times.with(alt_name, experiment_name.to_s).and_return(alt)
if should_finish
expect(alt).to receive(:increment_completion).at_most(1).times
else
expect(alt).not_to receive(:increment_completion)
end
end
it "completes the test" do
Split.configuration.experiments[:my_experiment] = {
alternatives: [ "control_opt", "other_opt" ],
metric: :my_metric
}
should_finish_experiment :my_experiment
ab_finished :my_metric
end
it "completes all relevant tests" do
Split.configuration.experiments = {
exp_1: {
alternatives: [ "1-1", "1-2" ],
metric: :my_metric
},
exp_2: {
alternatives: [ "2-1", "2-2" ],
metric: :another_metric
},
exp_3: {
alternatives: [ "3-1", "3-2" ],
metric: :my_metric
},
}
should_finish_experiment :exp_1
should_finish_experiment :exp_2, false
should_finish_experiment :exp_3
ab_finished :my_metric
end
it "passes reset option" do
Split.configuration.experiments = {
my_exp: {
alternatives: ["one", "two"],
metric: :my_metric,
resettable: false,
}
}
alternative_name = ab_test(:my_exp)
exp = Split::ExperimentCatalog.find :my_exp
ab_finished :my_metric
expect(ab_user[exp.key]).to eq(alternative_name)
expect(ab_user[exp.finished_key]).to be_truthy
end
it "passes through options" do
Split.configuration.experiments = {
my_exp: {
alternatives: ["one", "two"],
metric: :my_metric,
}
}
alternative_name = ab_test(:my_exp)
exp = Split::ExperimentCatalog.find :my_exp
ab_finished :my_metric, reset: false
expect(ab_user[exp.key]).to eq(alternative_name)
expect(ab_user[exp.finished_key]).to be_truthy
end
end
describe "conversions" do
it "should return a conversion rate for an alternative" do
alternative_name = ab_test("link_color", "blue", "red")
previous_convertion_rate = Split::Alternative.new(alternative_name, "link_color").conversion_rate
expect(previous_convertion_rate).to eq(0.0)
ab_finished("link_color")
new_convertion_rate = Split::Alternative.new(alternative_name, "link_color").conversion_rate
expect(new_convertion_rate).to eq(1.0)
end
end
describe "active experiments" do
it "should show an active test" do
alternative = ab_test("def", "4", "5", "6")
expect(active_experiments.count).to eq 1
expect(active_experiments.first[0]).to eq "def"
expect(active_experiments.first[1]).to eq alternative
end
it "should show a finished test" do
alternative = ab_test("def", "4", "5", "6")
ab_finished("def", { reset: false })
expect(active_experiments.count).to eq 1
expect(active_experiments.first[0]).to eq "def"
expect(active_experiments.first[1]).to eq alternative
end
it "should show an active test when an experiment is on a later version" do
experiment.reset
expect(experiment.version).to eq(1)
ab_test("link_color", "blue", "red")
expect(active_experiments.count).to eq 1
expect(active_experiments.first[0]).to eq "link_color"
end
it "should show versioned tests properly" do
10.times { experiment.reset }
alternative = ab_test(experiment.name, "blue", "red")
ab_finished(experiment.name, reset: false)
expect(experiment.version).to eq(10)
expect(active_experiments.count).to eq 1
expect(active_experiments).to eq({ "link_color" => alternative })
end
it "should show multiple tests" do
Split.configure do |config|
config.allow_multiple_experiments = true
end
alternative = ab_test("def", "4", "5", "6")
another_alternative = ab_test("ghi", "7", "8", "9")
expect(active_experiments.count).to eq 2
expect(active_experiments["def"]).to eq alternative
expect(active_experiments["ghi"]).to eq another_alternative
end
it "should not show tests with winners" do
Split.configure do |config|
config.allow_multiple_experiments = true
end
e = Split::ExperimentCatalog.find_or_create("def", "4", "5", "6")
e.winner = "4"
ab_test("def", "4", "5", "6")
another_alternative = ab_test("ghi", "7", "8", "9")
expect(active_experiments.count).to eq 1
expect(active_experiments.first[0]).to eq "ghi"
expect(active_experiments.first[1]).to eq another_alternative
end
end
describe "when user is a robot" do
before(:each) do
@request = OpenStruct.new(user_agent: "Googlebot/2.1 (+http://www.google.com/bot.html)")
end
describe "ab_test" do
it "should return the control" do
alternative = ab_test("link_color", "blue", "red")
expect(alternative).to eq experiment.control.name
end
it "should not create a experiment" do
ab_test("link_color", "blue", "red")
expect(Split::Experiment.new("link_color")).to be_a_new_record
end
it "should not increment the participation count" do
previous_red_count = Split::Alternative.new("red", "link_color").participant_count
previous_blue_count = Split::Alternative.new("blue", "link_color").participant_count
ab_test("link_color", "blue", "red")
new_red_count = Split::Alternative.new("red", "link_color").participant_count
new_blue_count = Split::Alternative.new("blue", "link_color").participant_count
expect((new_red_count + new_blue_count)).to eq(previous_red_count + previous_blue_count)
end
end
describe "finished" do
it "should not increment the completed count" do
alternative_name = ab_test("link_color", "blue", "red")
previous_completion_count = Split::Alternative.new(alternative_name, "link_color").completed_count
ab_finished("link_color")
new_completion_count = Split::Alternative.new(alternative_name, "link_color").completed_count
expect(new_completion_count).to eq(previous_completion_count)
end
end
end
describe "when providing custom ignore logic" do
context "using a proc to configure custom logic" do
before(:each) do
Split.configure do |c|
c.ignore_filter = proc { |request| true } # ignore everything
end
end
it "ignores the ab_test" do
ab_test("link_color", "blue", "red")
red_count = Split::Alternative.new("red", "link_color").participant_count
blue_count = Split::Alternative.new("blue", "link_color").participant_count
expect((red_count + blue_count)).to be(0)
end
end
end
shared_examples_for "a disabled test" do
describe "ab_test" do
it "should return the control" do
alternative = ab_test("link_color", "blue", "red")
expect(alternative).to eq experiment.control.name
end
it "should not increment the participation count" do
previous_red_count = Split::Alternative.new("red", "link_color").participant_count
previous_blue_count = Split::Alternative.new("blue", "link_color").participant_count
ab_test("link_color", "blue", "red")
new_red_count = Split::Alternative.new("red", "link_color").participant_count
new_blue_count = Split::Alternative.new("blue", "link_color").participant_count
expect((new_red_count + new_blue_count)).to eq(previous_red_count + previous_blue_count)
end
end
describe "finished" do
it "should not increment the completed count" do
alternative_name = ab_test("link_color", "blue", "red")
previous_completion_count = Split::Alternative.new(alternative_name, "link_color").completed_count
ab_finished("link_color")
new_completion_count = Split::Alternative.new(alternative_name, "link_color").completed_count
expect(new_completion_count).to eq(previous_completion_count)
end
end
end
describe "when ip address is ignored" do
context "individually" do
before(:each) do
@request = OpenStruct.new(ip: "81.19.48.130")
Split.configure do |c|
c.ignore_ip_addresses << "81.19.48.130"
end
end
it_behaves_like "a disabled test"
end
context "for a range" do
before(:each) do
@request = OpenStruct.new(ip: "81.19.48.129")
Split.configure do |c|
c.ignore_ip_addresses << /81\.19\.48\.[0-9]+/
end
end
it_behaves_like "a disabled test"
end
context "using both a range and a specific value" do
before(:each) do
@request = OpenStruct.new(ip: "81.19.48.128")
Split.configure do |c|
c.ignore_ip_addresses << "81.19.48.130"
c.ignore_ip_addresses << /81\.19\.48\.[0-9]+/
end
end
it_behaves_like "a disabled test"
end
context "when ignored other address" do
before do
@request = OpenStruct.new(ip: "1.1.1.1")
Split.configure do |c|
c.ignore_ip_addresses << "81.19.48.130"
end
end
it "works as usual" do
alternative_name = ab_test("link_color", "red", "blue")
expect {
ab_finished("link_color")
}.to change(Split::Alternative.new(alternative_name, "link_color"), :completed_count).by(1)
end
end
end
describe "when user is previewing" do
before(:each) do
@request = OpenStruct.new(headers: { "x-purpose" => "preview" })
end
it_behaves_like "a disabled test"
end
describe "versioned experiments" do
it "should use version zero if no version is present" do
alternative_name = ab_test("link_color", "blue", "red")
expect(experiment.version).to eq(0)
expect(ab_user["link_color"]).to eq(alternative_name)
end
it "should save the version of the experiment to the session" do
experiment.reset
expect(experiment.version).to eq(1)
alternative_name = ab_test("link_color", "blue", "red")
expect(ab_user["link_color:1"]).to eq(alternative_name)
end
it "should load the experiment even if the version is not 0" do
experiment.reset
expect(experiment.version).to eq(1)
alternative_name = ab_test("link_color", "blue", "red")
expect(ab_user["link_color:1"]).to eq(alternative_name)
return_alternative_name = ab_test("link_color", "blue", "red")
expect(return_alternative_name).to eq(alternative_name)
end
it "should reset the session of a user on an older version of the experiment" do
alternative_name = ab_test("link_color", "blue", "red")
expect(ab_user["link_color"]).to eq(alternative_name)
alternative = Split::Alternative.new(alternative_name, "link_color")
expect(alternative.participant_count).to eq(1)
experiment.reset
expect(experiment.version).to eq(1)
alternative = Split::Alternative.new(alternative_name, "link_color")
expect(alternative.participant_count).to eq(0)
new_alternative_name = ab_test("link_color", "blue", "red")
expect(ab_user["link_color:1"]).to eq(new_alternative_name)
new_alternative = Split::Alternative.new(new_alternative_name, "link_color")
expect(new_alternative.participant_count).to eq(1)
end
it "should cleanup old versions of experiments from the session" do
alternative_name = ab_test("link_color", "blue", "red")
expect(ab_user["link_color"]).to eq(alternative_name)
alternative = Split::Alternative.new(alternative_name, "link_color")
expect(alternative.participant_count).to eq(1)
experiment.reset
expect(experiment.version).to eq(1)
alternative = Split::Alternative.new(alternative_name, "link_color")
expect(alternative.participant_count).to eq(0)
new_alternative_name = ab_test("link_color", "blue", "red")
expect(ab_user["link_color:1"]).to eq(new_alternative_name)
end
it "should only count completion of users on the current version" do
alternative_name = ab_test("link_color", "blue", "red")
expect(ab_user["link_color"]).to eq(alternative_name)
Split::Alternative.new(alternative_name, "link_color")
experiment.reset
expect(experiment.version).to eq(1)
ab_finished("link_color")
alternative = Split::Alternative.new(alternative_name, "link_color")
expect(alternative.completed_count).to eq(0)
end
end
context "when redis is not available" do
before(:each) do
expect(Split).to receive(:redis).at_most(5).times.and_raise(Errno::ECONNREFUSED.new)
end
context "and db_failover config option is turned off" do
before(:each) do
Split.configure do |config|
config.db_failover = false
end
end
describe "ab_test" do
it "should raise an exception" do
expect { ab_test("link_color", "blue", "red") }.to raise_error(Errno::ECONNREFUSED)
end
end
describe "finished" do
it "should raise an exception" do
expect { ab_finished("link_color") }.to raise_error(Errno::ECONNREFUSED)
end
end
describe "disable split testing" do
before(:each) do
Split.configure do |config|
config.enabled = false
end
end
it "should not attempt to connect to redis" do
expect { ab_test("link_color", "blue", "red") }.not_to raise_error
end
it "should return control variable" do
expect(ab_test("link_color", "blue", "red")).to eq("blue")
expect { ab_finished("link_color") }.not_to raise_error
end
end
end
context "and db_failover config option is turned on" do
before(:each) do
Split.configure do |config|
config.db_failover = true
end
end
describe "ab_test" do
it "should not raise an exception" do
expect { ab_test("link_color", "blue", "red") }.not_to raise_error
end
it "should call db_failover_on_db_error proc with error as parameter" do
Split.configure do |config|
config.db_failover_on_db_error = proc do |error|
expect(error).to be_a(Errno::ECONNREFUSED)
end
end
expect(Split.configuration.db_failover_on_db_error).to receive(:call).and_call_original
ab_test("link_color", "blue", "red")
end
it "should always use first alternative" do
expect(ab_test("link_color", "blue", "red")).to eq("blue")
expect(ab_test("link_color", { "blue" => 0.01 }, "red" => 0.2)).to eq("blue")
expect(ab_test("link_color", { "blue" => 0.8 }, { "red" => 20 })).to eq("blue")
expect(ab_test("link_color", "blue", "red") do |alternative|
"shared/#{alternative}"
end).to eq("shared/blue")
end
context "and db_failover_allow_parameter_override config option is turned on" do
before(:each) do
Split.configure do |config|
config.db_failover_allow_parameter_override = true
end
end
context "and given an override parameter" do
it "should use given override instead of the first alternative" do
@params = { "ab_test" => { "link_color" => "red" } }
expect(ab_test("link_color", "blue", "red")).to eq("red")
expect(ab_test("link_color", "blue", "red", "green")).to eq("red")
expect(ab_test("link_color", { "blue" => 0.01 }, "red" => 0.2)).to eq("red")
expect(ab_test("link_color", { "blue" => 0.8 }, { "red" => 20 })).to eq("red")
expect(ab_test("link_color", "blue", "red") do |alternative|
"shared/#{alternative}"
end).to eq("shared/red")
end
end
end
context "and preloaded config given" do
before do
Split.configuration.experiments[:link_color] = {
alternatives: [ "blue", "red" ],
}
end
it "uses first alternative" do
expect(ab_test(:link_color)).to eq("blue")
end
end
end
describe "finished" do
it "should not raise an exception" do
expect { ab_finished("link_color") }.not_to raise_error
end
it "should call db_failover_on_db_error proc with error as parameter" do
Split.configure do |config|
config.db_failover_on_db_error = proc do |error|
expect(error).to be_a(Errno::ECONNREFUSED)
end
end
expect(Split.configuration.db_failover_on_db_error).to receive(:call).and_call_original
ab_finished("link_color")
end
end
end
end
context "with preloaded config" do
before { Split.configuration.experiments = {} }
it "pulls options from config file" do
Split.configuration.experiments[:my_experiment] = {
alternatives: [ "control_opt", "other_opt" ],
goals: ["goal1", "goal2"]
}
ab_test :my_experiment
expect(Split::Experiment.new(:my_experiment).alternatives.map(&:name)).to eq([ "control_opt", "other_opt" ])
expect(Split::Experiment.new(:my_experiment).goals).to eq([ "goal1", "goal2" ])
end
it "can be called multiple times" do
Split.configuration.experiments[:my_experiment] = {
alternatives: [ "control_opt", "other_opt" ],
goals: ["goal1", "goal2"]
}
5.times { ab_test :my_experiment }
experiment = Split::Experiment.new(:my_experiment)
expect(experiment.alternatives.map(&:name)).to eq([ "control_opt", "other_opt" ])
expect(experiment.goals).to eq([ "goal1", "goal2" ])
expect(experiment.participant_count).to eq(1)
end
it "accepts multiple goals" do
Split.configuration.experiments[:my_experiment] = {
alternatives: [ "control_opt", "other_opt" ],
goals: [ "goal1", "goal2", "goal3" ]
}
ab_test :my_experiment
experiment = Split::Experiment.new(:my_experiment)
expect(experiment.goals).to eq([ "goal1", "goal2", "goal3" ])
end
it "allow specifying goals to be optional" do
Split.configuration.experiments[:my_experiment] = {
alternatives: [ "control_opt", "other_opt" ]
}
experiment = Split::Experiment.new(:my_experiment)
expect(experiment.goals).to eq([])
end
it "accepts multiple alternatives" do
Split.configuration.experiments[:my_experiment] = {
alternatives: [ "control_opt", "second_opt", "third_opt" ],
}
ab_test :my_experiment
experiment = Split::Experiment.new(:my_experiment)
expect(experiment.alternatives.map(&:name)).to eq([ "control_opt", "second_opt", "third_opt" ])
end
it "accepts probability on alternatives" do
Split.configuration.experiments[:my_experiment] = {
alternatives: [
{ name: "control_opt", percent: 67 },
{ name: "second_opt", percent: 10 },
{ name: "third_opt", percent: 23 },
],
}
ab_test :my_experiment
experiment = Split::Experiment.new(:my_experiment)
expect(experiment.alternatives.collect { |a| [a.name, a.weight] }).to eq([["control_opt", 0.67], ["second_opt", 0.1], ["third_opt", 0.23]])
end
it "accepts probability on some alternatives" do
Split.configuration.experiments[:my_experiment] = {
alternatives: [
{ name: "control_opt", percent: 34 },
"second_opt",
{ name: "third_opt", percent: 23 },
"fourth_opt",
],
}
ab_test :my_experiment
experiment = Split::Experiment.new(:my_experiment)
names_and_weights = experiment.alternatives.collect { |a| [a.name, a.weight] }
expect(names_and_weights).to eq([["control_opt", 0.34], ["second_opt", 0.215], ["third_opt", 0.23], ["fourth_opt", 0.215]])
expect(names_and_weights.inject(0) { |sum, nw| sum + nw[1] }).to eq(1.0)
end
it "allows name param without probability" do
Split.configuration.experiments[:my_experiment] = {
alternatives: [
{ name: "control_opt" },
"second_opt",
{ name: "third_opt", percent: 64 },
],
}
ab_test :my_experiment
experiment = Split::Experiment.new(:my_experiment)
names_and_weights = experiment.alternatives.collect { |a| [a.name, a.weight] }
expect(names_and_weights).to eq([["control_opt", 0.18], ["second_opt", 0.18], ["third_opt", 0.64]])
expect(names_and_weights.inject(0) { |sum, nw| sum + nw[1] }).to eq(1.0)
end
it "fails gracefully if config is missing experiment" do
Split.configuration.experiments = { other_experiment: { foo: "Bar" } }
expect { ab_test :my_experiment }.to raise_error(Split::ExperimentNotFound)
end
it "fails gracefully if config is missing" do
expect { Split.configuration.experiments = nil }.to raise_error(Split::InvalidExperimentsFormatError)
end
it "fails gracefully if config is missing alternatives" do
Split.configuration.experiments[:my_experiment] = { foo: "Bar" }
expect { ab_test :my_experiment }.to raise_error(NoMethodError)
end
end
it "should handle multiple experiments correctly" do
experiment2 = Split::ExperimentCatalog.find_or_create("link_color2", "blue", "red")
ab_test("link_color", "blue", "red")
ab_test("link_color2", "blue", "red")
ab_finished("link_color2")
experiment2.alternatives.each do |alt|
expect(alt.unfinished_count).to eq(0)
end
end
context "with goals" do
before do
@experiment = { "link_color" => ["purchase", "refund"] }
@alternatives = ["blue", "red"]
@experiment_name, @goals = normalize_metric(@experiment)
@goal1 = @goals[0]
@goal2 = @goals[1]
end
it "should normalize experiment" do
expect(@experiment_name).to eq("link_color")
expect(@goals).to eq(["purchase", "refund"])
end
describe "ab_test" do
it "should allow experiment goals interface as a single hash" do
ab_test(@experiment, *@alternatives)
experiment = Split::ExperimentCatalog.find("link_color")
expect(experiment.goals).to eq(["purchase", "refund"])
end
end
describe "ab_finished" do
before do
@alternative_name = ab_test(@experiment, *@alternatives)
end
it "should increment the counter for the specified-goal completed alternative" do
expect { ab_finished({ "link_color" => ["purchase"] }) }
.to change { Split::Alternative.new(@alternative_name, @experiment_name).completed_count(@goal2) }.by(0)
.and change { Split::Alternative.new(@alternative_name, @experiment_name).completed_count(@goal1) }.by(1)
end
end
end
end
<MSG> Merge pull request #148 from andrew/custom_exclude_logic
Custom exclude logic
<DFF> @@ -380,6 +380,26 @@ describe Split::Helper do
end
end
+
+ describe 'when providing custom ignore logic' do
+ context "using a proc to configure custom logic" do
+
+ before(:each) do
+ Split.configure do |c|
+ c.ignore_filter = proc{|request| !!"i_am_going_to_be_disabled" }
+ end
+ end
+
+ it "ignores the ab_test" do
+ ab_test('link_color', 'blue', 'red')
+
+ red_count = Split::Alternative.new('red', 'link_color').participant_count
+ blue_count = Split::Alternative.new('blue', 'link_color').participant_count
+ (red_count + blue_count).should be(0)
+ end
+ end
+ end
+
shared_examples_for "a disabled test" do
describe 'ab_test' do
| 20 | Merge pull request #148 from andrew/custom_exclude_logic | 0 | .rb | rb | mit | splitrb/split |
10071088 | <NME> expand.ts
<BEF> import { strictEqual as equal } from 'assert';
import expand, { resolveConfig } from '../src';
describe('Expand Abbreviation', () => {
describe('Markup', () => {
it('basic', () => {
equal(expand('input[value="text$"]*2'), '<input type="text" value="text1"><input type="text" value="text2">');
equal(expand('ul>.item$*2'), '<ul>\n\t<li class="item1"></li>\n\t<li class="item2"></li>\n</ul>');
// insert text into abbreviation
equal(expand('ul>.item$*', { text: ['foo', 'bar'] }), '<ul>\n\t<li class="item1">foo</li>\n\t<li class="item2">bar</li>\n</ul>');
// insert TextMate-style fields/tabstops in output
equal(expand('ul>.item$*2', {
options: {
'output.field': (index, placeholder) => `\${${index}${placeholder ? ':' + placeholder : ''}}`
}
}), '<ul>\n\t<li class="item1">${1}</li>\n\t<li class="item2">${2}</li>\n</ul>');
});
it('attributes', () => {
const snippets = {
test: 'test[!foo bar. baz={}]'
};
const opt = { snippets };
const reverse = {
options: { 'output.reverseAttributes': true },
snippets
};
equal(expand('a.test'), '<a href="" class="test"></a>');
equal(expand('a.test', reverse), '<a class="test" href=""></a>');
equal(expand('test', opt), '<test bar="bar" baz={}></test>');
equal(expand('test[foo]', opt), '<test bar="bar" baz={}></test>');
equal(expand('test[baz=a foo=1]', opt), '<test foo="1" bar="bar" baz={a}></test>');
// Apply attributes in reverse order
equal(expand('test', reverse), '<test bar="bar" baz={}></test>');
equal(expand('test[foo]', reverse), '<test bar="bar" baz={}></test>');
equal(expand('test[baz=a foo=1]', reverse), '<test baz={a} foo="1" bar="bar"></test>');
});
it('numbering', () => {
equal(expand('ul>li.item$@-*5'), '<ul>\n\t<li class="item5"></li>\n\t<li class="item4"></li>\n\t<li class="item3"></li>\n\t<li class="item2"></li>\n\t<li class="item1"></li>\n</ul>');
});
it('syntax', () => {
equal(expand('ul>.item$*2', { syntax: 'html' }), '<ul>\n\t<li class="item1"></li>\n\t<li class="item2"></li>\n</ul>');
equal(expand('ul>.item$*2', { syntax: 'slim' }), 'ul\n\tli.item1 \n\tli.item2 ');
equal(expand('xsl:variable[name=a select=b]>div', { syntax: 'xsl' }), '<xsl:variable name="a">\n\t<div></div>\n</xsl:variable>');
});
it('custom profile', () => {
equal(expand('img'), '<img src="" alt="">');
equal(expand('img', { options: { 'output.selfClosingStyle': 'xhtml' } }), '<img src="" alt="" />');
});
it('custom variables', () => {
const variables = { charset: 'ru-RU' };
equal(expand('[charset=${charset}]{${charset}}'), '<div charset="UTF-8">UTF-8</div>');
equal(expand('[charset=${charset}]{${charset}}', { variables }), '<div charset="ru-RU">ru-RU</div>');
});
it('custom snippets', () => {
const snippets = {
link: 'link[foo=bar href]/',
foo: '.foo[bar=baz]',
repeat: 'div>ul>li{Hello World}*3'
};
equal(expand('foo', { snippets }), '<div class="foo" bar="baz"></div>');
// `link:css` depends on `link` snippet so changing it will result in
// altered `link:css` result
equal(expand('link:css'), '<link rel="stylesheet" href="style.css">');
equal(expand('link:css', { snippets }), '<link foo="bar" href="style.css">');
// https://github.com/emmetio/emmet/issues/468
equal(expand('repeat', { snippets }), '<div>\n\t<ul>\n\t\t<li>Hello World</li>\n\t\t<li>Hello World</li>\n\t\t<li>Hello World</li>\n\t</ul>\n</div>');
});
it('formatter options', () => {
equal(expand('ul>.item$*2'), '<ul>\n\t<li class="item1"></li>\n\t<li class="item2"></li>\n</ul>');
equal(expand('ul>.item$*2', { options: { 'comment.enabled': true } }),
'<ul>\n\t<li class="item1"></li>\n\t<!-- /.item1 -->\n\t<li class="item2"></li>\n\t<!-- /.item2 -->\n</ul>');
equal(expand('div>p'), '<div>\n\t<p></p>\n</div>');
equal(expand('div>p', { options: { 'output.formatLeafNode': true } }), '<div>\n\t<p>\n\t\t\n\t</p>\n</div>');
});
it('JSX', () => {
const config = { syntax: 'jsx' };
equal(expand('div#foo.bar', config), '<div id="foo" className="bar"></div>');
equal(expand('label[for=a]', config), '<label htmlFor="a"></label>');
equal(expand('Foo.Bar', config), '<Foo.Bar></Foo.Bar>');
equal(expand('div.{theme.style}', config), '<div className={theme.style}></div>');
});
it('wrap with abbreviation', () => {
equal(expand('div>ul', { text: ['<div>line1</div>\n<div>line2</div>'] }),
'<div>\n\t<ul>\n\t\t<div>line1</div>\n\t\t<div>line2</div>\n\t</ul>\n</div>');
equal(expand('p', { text: 'foo\nbar'}), '<p>\n\tfoo\n\tbar\n</p>');
equal(expand('p', { text: '<div>foo</div>'}), '<p>\n\t<div>foo</div>\n</p>');
equal(expand('p', { text: '<span>foo</span>'}), '<p><span>foo</span></p>');
equal(expand('p', { text: 'foo<span>foo</span>'}), '<p>foo<span>foo</span></p>');
equal(expand('p', { text: 'foo<div>foo</div>'}), '<p>foo<div>foo</div></p>');
});
it('wrap with abbreviation href', () => {
equal(expand('a', { text: ['www.google.it'] }), '<a href="http://www.google.it">www.google.it</a>');
equal(expand('a', { text: ['then www.google.it'] }), '<a href="">then www.google.it</a>');
equal(expand('a', { text: ['www.google.it'], options: { 'markup.href': false } }), '<a href="">www.google.it</a>');
});
// it.only('debug', () => {
'<a href="https://example.com"><b><u>some text false</u></b></a>');
equal(expand('a[href="https://example.com"]>b', { text: ['<u>some text true</u>'], options: { 'markup.href': true } }),
'<a href="https://example.com"><b><u>some text true</u></b></a>');
equal(expand('a[href="https://example.com"]>div', { text: ['<p>some text false</p>'], options: { 'markup.href': false } }),
'<a href="https://example.com">\n\t<div>\n\t\t<p>some text false</p>\n\t</div>\n</a>');
equal(expand('a[href="https://example.com"]>div', { text: ['<p>some text true</p>'], options: { 'markup.href': true } }),
'<a href="https://example.com">\n\t<div>\n\t\t<p>some text true</p>\n\t</div>\n</a>');
});
// it.only('debug', () => {
// equal(expand('link:css'), '<link rel="stylesheet" href="style.css">');
// });
});
describe('Pug templates', () => {
const config = resolveConfig({ syntax: 'pug' });
it('basic', () => {
equal(expand('!', config), 'doctype html\nhtml(lang="en")\n\thead\n\t\tmeta(charset="UTF-8")\n\t\tmeta(http-equiv="X-UA-Compatible", content="IE=edge")\n\t\tmeta(name="viewport", content="width=device-width, initial-scale=1.0")\n\t\ttitle Document\n\tbody ');
});
});
});
<MSG> Fix text node showing up twice during wraps (#638)
<DFF> @@ -35,6 +35,11 @@ describe('Expand Abbreviation', () => {
equal(expand('test[foo]', opt), '<test bar="bar" baz={}></test>');
equal(expand('test[baz=a foo=1]', opt), '<test foo="1" bar="bar" baz={a}></test>');
+ equal(expand('map'), '<map name=""></map>');
+ equal(expand('map[]'), '<map name=""></map>');
+ equal(expand('map[name="valid"]'), '<map name="valid"></map>');
+ equal(expand('map[href="invalid"]'), '<map name="" href="invalid"></map>');
+
// Apply attributes in reverse order
equal(expand('test', reverse), '<test bar="bar" baz={}></test>');
equal(expand('test[foo]', reverse), '<test bar="bar" baz={}></test>');
@@ -101,17 +106,33 @@ describe('Expand Abbreviation', () => {
it('wrap with abbreviation', () => {
equal(expand('div>ul', { text: ['<div>line1</div>\n<div>line2</div>'] }),
'<div>\n\t<ul>\n\t\t<div>line1</div>\n\t\t<div>line2</div>\n\t</ul>\n</div>');
- equal(expand('p', { text: 'foo\nbar'}), '<p>\n\tfoo\n\tbar\n</p>');
- equal(expand('p', { text: '<div>foo</div>'}), '<p>\n\t<div>foo</div>\n</p>');
- equal(expand('p', { text: '<span>foo</span>'}), '<p><span>foo</span></p>');
- equal(expand('p', { text: 'foo<span>foo</span>'}), '<p>foo<span>foo</span></p>');
- equal(expand('p', { text: 'foo<div>foo</div>'}), '<p>foo<div>foo</div></p>');
+ equal(expand('p', { text: 'foo\nbar' }), '<p>\n\tfoo\n\tbar\n</p>');
+ equal(expand('p', { text: '<div>foo</div>' }), '<p>\n\t<div>foo</div>\n</p>');
+ equal(expand('p', { text: '<span>foo</span>' }), '<p><span>foo</span></p>');
+ equal(expand('p', { text: 'foo<span>foo</span>' }), '<p>foo<span>foo</span></p>');
+ equal(expand('p', { text: 'foo<div>foo</div>' }), '<p>foo<div>foo</div></p>');
});
it('wrap with abbreviation href', () => {
equal(expand('a', { text: ['www.google.it'] }), '<a href="http://www.google.it">www.google.it</a>');
equal(expand('a', { text: ['then www.google.it'] }), '<a href="">then www.google.it</a>');
equal(expand('a', { text: ['www.google.it'], options: { 'markup.href': false } }), '<a href="">www.google.it</a>');
+
+ equal(expand('map[name="https://example.com"]', { text: ['some text'] }),
+ '<map name="https://example.com">some text</map>');
+ equal(expand('map[href="https://example.com"]', { text: ['some text'] }),
+ '<map name="" href="https://example.com">some text</map>');
+ equal(expand('map[name="https://example.com"]>b', { text: ['some text'] }),
+ '<map name="https://example.com"><b>some text</b></map>');
+
+ equal(expand('a[href="https://example.com"]>b', { text: ['<u>some text false</u>'], options: { 'markup.href': false } }),
+ '<a href="https://example.com"><b><u>some text false</u></b></a>');
+ equal(expand('a[href="https://example.com"]>b', { text: ['<u>some text true</u>'], options: { 'markup.href': true } }),
+ '<a href="https://example.com"><b><u>some text true</u></b></a>');
+ equal(expand('a[href="https://example.com"]>div', { text: ['<p>some text false</p>'], options: { 'markup.href': false } }),
+ '<a href="https://example.com">\n\t<div>\n\t\t<p>some text false</p>\n\t</div>\n</a>');
+ equal(expand('a[href="https://example.com"]>div', { text: ['<p>some text true</p>'], options: { 'markup.href': true } }),
+ '<a href="https://example.com">\n\t<div>\n\t\t<p>some text true</p>\n\t</div>\n</a>');
});
// it.only('debug', () => {
| 26 | Fix text node showing up twice during wraps (#638) | 5 | .ts | ts | mit | emmetio/emmet |
10071089 | <NME> expand.ts
<BEF> import { strictEqual as equal } from 'assert';
import expand, { resolveConfig } from '../src';
describe('Expand Abbreviation', () => {
describe('Markup', () => {
it('basic', () => {
equal(expand('input[value="text$"]*2'), '<input type="text" value="text1"><input type="text" value="text2">');
equal(expand('ul>.item$*2'), '<ul>\n\t<li class="item1"></li>\n\t<li class="item2"></li>\n</ul>');
// insert text into abbreviation
equal(expand('ul>.item$*', { text: ['foo', 'bar'] }), '<ul>\n\t<li class="item1">foo</li>\n\t<li class="item2">bar</li>\n</ul>');
// insert TextMate-style fields/tabstops in output
equal(expand('ul>.item$*2', {
options: {
'output.field': (index, placeholder) => `\${${index}${placeholder ? ':' + placeholder : ''}}`
}
}), '<ul>\n\t<li class="item1">${1}</li>\n\t<li class="item2">${2}</li>\n</ul>');
});
it('attributes', () => {
const snippets = {
test: 'test[!foo bar. baz={}]'
};
const opt = { snippets };
const reverse = {
options: { 'output.reverseAttributes': true },
snippets
};
equal(expand('a.test'), '<a href="" class="test"></a>');
equal(expand('a.test', reverse), '<a class="test" href=""></a>');
equal(expand('test', opt), '<test bar="bar" baz={}></test>');
equal(expand('test[foo]', opt), '<test bar="bar" baz={}></test>');
equal(expand('test[baz=a foo=1]', opt), '<test foo="1" bar="bar" baz={a}></test>');
// Apply attributes in reverse order
equal(expand('test', reverse), '<test bar="bar" baz={}></test>');
equal(expand('test[foo]', reverse), '<test bar="bar" baz={}></test>');
equal(expand('test[baz=a foo=1]', reverse), '<test baz={a} foo="1" bar="bar"></test>');
});
it('numbering', () => {
equal(expand('ul>li.item$@-*5'), '<ul>\n\t<li class="item5"></li>\n\t<li class="item4"></li>\n\t<li class="item3"></li>\n\t<li class="item2"></li>\n\t<li class="item1"></li>\n</ul>');
});
it('syntax', () => {
equal(expand('ul>.item$*2', { syntax: 'html' }), '<ul>\n\t<li class="item1"></li>\n\t<li class="item2"></li>\n</ul>');
equal(expand('ul>.item$*2', { syntax: 'slim' }), 'ul\n\tli.item1 \n\tli.item2 ');
equal(expand('xsl:variable[name=a select=b]>div', { syntax: 'xsl' }), '<xsl:variable name="a">\n\t<div></div>\n</xsl:variable>');
});
it('custom profile', () => {
equal(expand('img'), '<img src="" alt="">');
equal(expand('img', { options: { 'output.selfClosingStyle': 'xhtml' } }), '<img src="" alt="" />');
});
it('custom variables', () => {
const variables = { charset: 'ru-RU' };
equal(expand('[charset=${charset}]{${charset}}'), '<div charset="UTF-8">UTF-8</div>');
equal(expand('[charset=${charset}]{${charset}}', { variables }), '<div charset="ru-RU">ru-RU</div>');
});
it('custom snippets', () => {
const snippets = {
link: 'link[foo=bar href]/',
foo: '.foo[bar=baz]',
repeat: 'div>ul>li{Hello World}*3'
};
equal(expand('foo', { snippets }), '<div class="foo" bar="baz"></div>');
// `link:css` depends on `link` snippet so changing it will result in
// altered `link:css` result
equal(expand('link:css'), '<link rel="stylesheet" href="style.css">');
equal(expand('link:css', { snippets }), '<link foo="bar" href="style.css">');
// https://github.com/emmetio/emmet/issues/468
equal(expand('repeat', { snippets }), '<div>\n\t<ul>\n\t\t<li>Hello World</li>\n\t\t<li>Hello World</li>\n\t\t<li>Hello World</li>\n\t</ul>\n</div>');
});
it('formatter options', () => {
equal(expand('ul>.item$*2'), '<ul>\n\t<li class="item1"></li>\n\t<li class="item2"></li>\n</ul>');
equal(expand('ul>.item$*2', { options: { 'comment.enabled': true } }),
'<ul>\n\t<li class="item1"></li>\n\t<!-- /.item1 -->\n\t<li class="item2"></li>\n\t<!-- /.item2 -->\n</ul>');
equal(expand('div>p'), '<div>\n\t<p></p>\n</div>');
equal(expand('div>p', { options: { 'output.formatLeafNode': true } }), '<div>\n\t<p>\n\t\t\n\t</p>\n</div>');
});
it('JSX', () => {
const config = { syntax: 'jsx' };
equal(expand('div#foo.bar', config), '<div id="foo" className="bar"></div>');
equal(expand('label[for=a]', config), '<label htmlFor="a"></label>');
equal(expand('Foo.Bar', config), '<Foo.Bar></Foo.Bar>');
equal(expand('div.{theme.style}', config), '<div className={theme.style}></div>');
});
it('wrap with abbreviation', () => {
equal(expand('div>ul', { text: ['<div>line1</div>\n<div>line2</div>'] }),
'<div>\n\t<ul>\n\t\t<div>line1</div>\n\t\t<div>line2</div>\n\t</ul>\n</div>');
equal(expand('p', { text: 'foo\nbar'}), '<p>\n\tfoo\n\tbar\n</p>');
equal(expand('p', { text: '<div>foo</div>'}), '<p>\n\t<div>foo</div>\n</p>');
equal(expand('p', { text: '<span>foo</span>'}), '<p><span>foo</span></p>');
equal(expand('p', { text: 'foo<span>foo</span>'}), '<p>foo<span>foo</span></p>');
equal(expand('p', { text: 'foo<div>foo</div>'}), '<p>foo<div>foo</div></p>');
});
it('wrap with abbreviation href', () => {
equal(expand('a', { text: ['www.google.it'] }), '<a href="http://www.google.it">www.google.it</a>');
equal(expand('a', { text: ['then www.google.it'] }), '<a href="">then www.google.it</a>');
equal(expand('a', { text: ['www.google.it'], options: { 'markup.href': false } }), '<a href="">www.google.it</a>');
});
// it.only('debug', () => {
'<a href="https://example.com"><b><u>some text false</u></b></a>');
equal(expand('a[href="https://example.com"]>b', { text: ['<u>some text true</u>'], options: { 'markup.href': true } }),
'<a href="https://example.com"><b><u>some text true</u></b></a>');
equal(expand('a[href="https://example.com"]>div', { text: ['<p>some text false</p>'], options: { 'markup.href': false } }),
'<a href="https://example.com">\n\t<div>\n\t\t<p>some text false</p>\n\t</div>\n</a>');
equal(expand('a[href="https://example.com"]>div', { text: ['<p>some text true</p>'], options: { 'markup.href': true } }),
'<a href="https://example.com">\n\t<div>\n\t\t<p>some text true</p>\n\t</div>\n</a>');
});
// it.only('debug', () => {
// equal(expand('link:css'), '<link rel="stylesheet" href="style.css">');
// });
});
describe('Pug templates', () => {
const config = resolveConfig({ syntax: 'pug' });
it('basic', () => {
equal(expand('!', config), 'doctype html\nhtml(lang="en")\n\thead\n\t\tmeta(charset="UTF-8")\n\t\tmeta(http-equiv="X-UA-Compatible", content="IE=edge")\n\t\tmeta(name="viewport", content="width=device-width, initial-scale=1.0")\n\t\ttitle Document\n\tbody ');
});
});
});
<MSG> Fix text node showing up twice during wraps (#638)
<DFF> @@ -35,6 +35,11 @@ describe('Expand Abbreviation', () => {
equal(expand('test[foo]', opt), '<test bar="bar" baz={}></test>');
equal(expand('test[baz=a foo=1]', opt), '<test foo="1" bar="bar" baz={a}></test>');
+ equal(expand('map'), '<map name=""></map>');
+ equal(expand('map[]'), '<map name=""></map>');
+ equal(expand('map[name="valid"]'), '<map name="valid"></map>');
+ equal(expand('map[href="invalid"]'), '<map name="" href="invalid"></map>');
+
// Apply attributes in reverse order
equal(expand('test', reverse), '<test bar="bar" baz={}></test>');
equal(expand('test[foo]', reverse), '<test bar="bar" baz={}></test>');
@@ -101,17 +106,33 @@ describe('Expand Abbreviation', () => {
it('wrap with abbreviation', () => {
equal(expand('div>ul', { text: ['<div>line1</div>\n<div>line2</div>'] }),
'<div>\n\t<ul>\n\t\t<div>line1</div>\n\t\t<div>line2</div>\n\t</ul>\n</div>');
- equal(expand('p', { text: 'foo\nbar'}), '<p>\n\tfoo\n\tbar\n</p>');
- equal(expand('p', { text: '<div>foo</div>'}), '<p>\n\t<div>foo</div>\n</p>');
- equal(expand('p', { text: '<span>foo</span>'}), '<p><span>foo</span></p>');
- equal(expand('p', { text: 'foo<span>foo</span>'}), '<p>foo<span>foo</span></p>');
- equal(expand('p', { text: 'foo<div>foo</div>'}), '<p>foo<div>foo</div></p>');
+ equal(expand('p', { text: 'foo\nbar' }), '<p>\n\tfoo\n\tbar\n</p>');
+ equal(expand('p', { text: '<div>foo</div>' }), '<p>\n\t<div>foo</div>\n</p>');
+ equal(expand('p', { text: '<span>foo</span>' }), '<p><span>foo</span></p>');
+ equal(expand('p', { text: 'foo<span>foo</span>' }), '<p>foo<span>foo</span></p>');
+ equal(expand('p', { text: 'foo<div>foo</div>' }), '<p>foo<div>foo</div></p>');
});
it('wrap with abbreviation href', () => {
equal(expand('a', { text: ['www.google.it'] }), '<a href="http://www.google.it">www.google.it</a>');
equal(expand('a', { text: ['then www.google.it'] }), '<a href="">then www.google.it</a>');
equal(expand('a', { text: ['www.google.it'], options: { 'markup.href': false } }), '<a href="">www.google.it</a>');
+
+ equal(expand('map[name="https://example.com"]', { text: ['some text'] }),
+ '<map name="https://example.com">some text</map>');
+ equal(expand('map[href="https://example.com"]', { text: ['some text'] }),
+ '<map name="" href="https://example.com">some text</map>');
+ equal(expand('map[name="https://example.com"]>b', { text: ['some text'] }),
+ '<map name="https://example.com"><b>some text</b></map>');
+
+ equal(expand('a[href="https://example.com"]>b', { text: ['<u>some text false</u>'], options: { 'markup.href': false } }),
+ '<a href="https://example.com"><b><u>some text false</u></b></a>');
+ equal(expand('a[href="https://example.com"]>b', { text: ['<u>some text true</u>'], options: { 'markup.href': true } }),
+ '<a href="https://example.com"><b><u>some text true</u></b></a>');
+ equal(expand('a[href="https://example.com"]>div', { text: ['<p>some text false</p>'], options: { 'markup.href': false } }),
+ '<a href="https://example.com">\n\t<div>\n\t\t<p>some text false</p>\n\t</div>\n</a>');
+ equal(expand('a[href="https://example.com"]>div', { text: ['<p>some text true</p>'], options: { 'markup.href': true } }),
+ '<a href="https://example.com">\n\t<div>\n\t\t<p>some text true</p>\n\t</div>\n</a>');
});
// it.only('debug', () => {
| 26 | Fix text node showing up twice during wraps (#638) | 5 | .ts | ts | mit | emmetio/emmet |
10071090 | <NME> 5.1.gemfile
<BEF> ADDFILE
<MSG> Test on rails 5.1 as well
<DFF> @@ -0,0 +1,10 @@
+# This file was generated by Appraisal
+
+source "https://rubygems.org"
+
+gem "appraisal"
+gem "codeclimate-test-reporter"
+gem "rails", "~> 5.1"
+gem "sinatra", git: "https://github.com/sinatra/sinatra"
+
+gemspec path: "../"
| 10 | Test on rails 5.1 as well | 0 | .gemfile | 1 | mit | splitrb/split |
10071091 | <NME> 5.1.gemfile
<BEF> ADDFILE
<MSG> Test on rails 5.1 as well
<DFF> @@ -0,0 +1,10 @@
+# This file was generated by Appraisal
+
+source "https://rubygems.org"
+
+gem "appraisal"
+gem "codeclimate-test-reporter"
+gem "rails", "~> 5.1"
+gem "sinatra", git: "https://github.com/sinatra/sinatra"
+
+gemspec path: "../"
| 10 | Test on rails 5.1 as well | 0 | .gemfile | 1 | mit | splitrb/split |
10071092 | <NME> 5.1.gemfile
<BEF> ADDFILE
<MSG> Test on rails 5.1 as well
<DFF> @@ -0,0 +1,10 @@
+# This file was generated by Appraisal
+
+source "https://rubygems.org"
+
+gem "appraisal"
+gem "codeclimate-test-reporter"
+gem "rails", "~> 5.1"
+gem "sinatra", git: "https://github.com/sinatra/sinatra"
+
+gemspec path: "../"
| 10 | Test on rails 5.1 as well | 0 | .gemfile | 1 | mit | splitrb/split |
10071093 | <NME> redis_adapter_spec.rb
<BEF> ADDFILE
<MSG> Merge pull request #203 from enova/redis-persist
Persistence within Redis directly
<DFF> @@ -0,0 +1,81 @@
+require "spec_helper"
+
+describe Split::Persistence::RedisAdapter do
+
+ let(:context) { double(:lookup => 'blah') }
+
+ subject { Split::Persistence::RedisAdapter.new(context) }
+
+ describe '#redis_key' do
+ before { Split::Persistence::RedisAdapter.reset_config! }
+
+ context 'default' do
+ it 'should raise error with prompt to set lookup_by' do
+ expect{Split::Persistence::RedisAdapter.new(context)
+ }.to raise_error
+ end
+ end
+
+ context 'config with lookup_by = proc { "block" }' do
+ before { Split::Persistence::RedisAdapter.with_config(:lookup_by => proc{'block'}) }
+
+ it 'should be "persistence:block"' do
+ subject.redis_key.should == 'persistence:block'
+ end
+ end
+
+ context 'config with lookup_by = proc { |context| context.test }' do
+ before { Split::Persistence::RedisAdapter.with_config(:lookup_by => proc{'block'}) }
+ let(:context) { double(:test => 'block') }
+
+ it 'should be "persistence:block"' do
+ subject.redis_key.should == 'persistence:block'
+ end
+ end
+
+ context 'config with lookup_by = "method_name"' do
+ before { Split::Persistence::RedisAdapter.with_config(:lookup_by => 'method_name') }
+ let(:context) { double(:method_name => 'val') }
+
+ it 'should be "persistence:bar"' do
+ subject.redis_key.should == 'persistence:val'
+ end
+ end
+
+ context 'config with namespace and lookup_by' do
+ before { Split::Persistence::RedisAdapter.with_config(:lookup_by => proc{'frag'}, :namespace => 'namer') }
+
+ it 'should be "namer"' do
+ subject.redis_key.should == 'namer:frag'
+ end
+ end
+ end
+
+ context 'functional tests' do
+ before { Split::Persistence::RedisAdapter.with_config(:lookup_by => 'lookup') }
+
+ describe "#[] and #[]=" do
+ it "should set and return the value for given key" do
+ subject["my_key"] = "my_value"
+ subject["my_key"].should eq("my_value")
+ end
+ end
+
+ describe "#delete" do
+ it "should delete the given key" do
+ subject["my_key"] = "my_value"
+ subject.delete("my_key")
+ subject["my_key"].should be_nil
+ end
+ end
+
+ describe "#keys" do
+ it "should return an array of the user's stored keys" do
+ subject["my_key"] = "my_value"
+ subject["my_second_key"] = "my_second_value"
+ subject.keys.should =~ ["my_key", "my_second_key"]
+ end
+ end
+
+ end
+end
| 81 | Merge pull request #203 from enova/redis-persist | 0 | .rb | rb | mit | splitrb/split |
10071094 | <NME> redis_adapter_spec.rb
<BEF> ADDFILE
<MSG> Merge pull request #203 from enova/redis-persist
Persistence within Redis directly
<DFF> @@ -0,0 +1,81 @@
+require "spec_helper"
+
+describe Split::Persistence::RedisAdapter do
+
+ let(:context) { double(:lookup => 'blah') }
+
+ subject { Split::Persistence::RedisAdapter.new(context) }
+
+ describe '#redis_key' do
+ before { Split::Persistence::RedisAdapter.reset_config! }
+
+ context 'default' do
+ it 'should raise error with prompt to set lookup_by' do
+ expect{Split::Persistence::RedisAdapter.new(context)
+ }.to raise_error
+ end
+ end
+
+ context 'config with lookup_by = proc { "block" }' do
+ before { Split::Persistence::RedisAdapter.with_config(:lookup_by => proc{'block'}) }
+
+ it 'should be "persistence:block"' do
+ subject.redis_key.should == 'persistence:block'
+ end
+ end
+
+ context 'config with lookup_by = proc { |context| context.test }' do
+ before { Split::Persistence::RedisAdapter.with_config(:lookup_by => proc{'block'}) }
+ let(:context) { double(:test => 'block') }
+
+ it 'should be "persistence:block"' do
+ subject.redis_key.should == 'persistence:block'
+ end
+ end
+
+ context 'config with lookup_by = "method_name"' do
+ before { Split::Persistence::RedisAdapter.with_config(:lookup_by => 'method_name') }
+ let(:context) { double(:method_name => 'val') }
+
+ it 'should be "persistence:bar"' do
+ subject.redis_key.should == 'persistence:val'
+ end
+ end
+
+ context 'config with namespace and lookup_by' do
+ before { Split::Persistence::RedisAdapter.with_config(:lookup_by => proc{'frag'}, :namespace => 'namer') }
+
+ it 'should be "namer"' do
+ subject.redis_key.should == 'namer:frag'
+ end
+ end
+ end
+
+ context 'functional tests' do
+ before { Split::Persistence::RedisAdapter.with_config(:lookup_by => 'lookup') }
+
+ describe "#[] and #[]=" do
+ it "should set and return the value for given key" do
+ subject["my_key"] = "my_value"
+ subject["my_key"].should eq("my_value")
+ end
+ end
+
+ describe "#delete" do
+ it "should delete the given key" do
+ subject["my_key"] = "my_value"
+ subject.delete("my_key")
+ subject["my_key"].should be_nil
+ end
+ end
+
+ describe "#keys" do
+ it "should return an array of the user's stored keys" do
+ subject["my_key"] = "my_value"
+ subject["my_second_key"] = "my_second_value"
+ subject.keys.should =~ ["my_key", "my_second_key"]
+ end
+ end
+
+ end
+end
| 81 | Merge pull request #203 from enova/redis-persist | 0 | .rb | rb | mit | splitrb/split |
10071095 | <NME> redis_adapter_spec.rb
<BEF> ADDFILE
<MSG> Merge pull request #203 from enova/redis-persist
Persistence within Redis directly
<DFF> @@ -0,0 +1,81 @@
+require "spec_helper"
+
+describe Split::Persistence::RedisAdapter do
+
+ let(:context) { double(:lookup => 'blah') }
+
+ subject { Split::Persistence::RedisAdapter.new(context) }
+
+ describe '#redis_key' do
+ before { Split::Persistence::RedisAdapter.reset_config! }
+
+ context 'default' do
+ it 'should raise error with prompt to set lookup_by' do
+ expect{Split::Persistence::RedisAdapter.new(context)
+ }.to raise_error
+ end
+ end
+
+ context 'config with lookup_by = proc { "block" }' do
+ before { Split::Persistence::RedisAdapter.with_config(:lookup_by => proc{'block'}) }
+
+ it 'should be "persistence:block"' do
+ subject.redis_key.should == 'persistence:block'
+ end
+ end
+
+ context 'config with lookup_by = proc { |context| context.test }' do
+ before { Split::Persistence::RedisAdapter.with_config(:lookup_by => proc{'block'}) }
+ let(:context) { double(:test => 'block') }
+
+ it 'should be "persistence:block"' do
+ subject.redis_key.should == 'persistence:block'
+ end
+ end
+
+ context 'config with lookup_by = "method_name"' do
+ before { Split::Persistence::RedisAdapter.with_config(:lookup_by => 'method_name') }
+ let(:context) { double(:method_name => 'val') }
+
+ it 'should be "persistence:bar"' do
+ subject.redis_key.should == 'persistence:val'
+ end
+ end
+
+ context 'config with namespace and lookup_by' do
+ before { Split::Persistence::RedisAdapter.with_config(:lookup_by => proc{'frag'}, :namespace => 'namer') }
+
+ it 'should be "namer"' do
+ subject.redis_key.should == 'namer:frag'
+ end
+ end
+ end
+
+ context 'functional tests' do
+ before { Split::Persistence::RedisAdapter.with_config(:lookup_by => 'lookup') }
+
+ describe "#[] and #[]=" do
+ it "should set and return the value for given key" do
+ subject["my_key"] = "my_value"
+ subject["my_key"].should eq("my_value")
+ end
+ end
+
+ describe "#delete" do
+ it "should delete the given key" do
+ subject["my_key"] = "my_value"
+ subject.delete("my_key")
+ subject["my_key"].should be_nil
+ end
+ end
+
+ describe "#keys" do
+ it "should return an array of the user's stored keys" do
+ subject["my_key"] = "my_value"
+ subject["my_second_key"] = "my_second_value"
+ subject.keys.should =~ ["my_key", "my_second_key"]
+ end
+ end
+
+ end
+end
| 81 | Merge pull request #203 from enova/redis-persist | 0 | .rb | rb | mit | splitrb/split |
10071096 | <NME> redis_adapter_spec.rb
<BEF> # frozen_string_literal: true
describe Split::Persistence::RedisAdapter do
let(:context) { nil }
subject { Split::Persistence::RedisAdapter.new(context) }
before { Split::Persistence::RedisAdapter.reset_config! }
context 'default' do
it 'should be "persistence"' do
subject.redis_key.should == 'persistence'
end
end
context 'config with namespace' do
before { Split::Persistence::RedisAdapter.with_config(:namespace => 'namer') }
it 'should be "namer"' do
subject.redis_key.should == 'namer'
end
end
context "config with lookup_by = proc { |context| context.test }" do
before { Split::Persistence::RedisAdapter.with_config(lookup_by: proc { "block" }) }
let(:context) { double(test: "block") }
it 'should be "persistence:block"' do
expect(subject.redis_key).to eq("persistence:block")
end
end
context 'config with lookup_by = proc { "block" }' do
before { Split::Persistence::RedisAdapter.with_config(:lookup_by => proc{'block'}) }
it 'should be "persistence:block"' do
subject.redis_key.should == 'persistence:block'
end
end
end
describe "#[] and #[]=" do
it "should set and return the value for given key" do
subject["my_key"] = "my_value"
subject["my_key"].should eq("my_value")
end
end
describe "#delete" do
it "should delete the given key" do
subject["my_key"] = "my_value"
subject.delete("my_key")
subject["my_key"].should be_nil
end
end
describe "#keys" do
it "should return an array of the user's stored keys" do
subject["my_key"] = "my_value"
subject["my_second_key"] = "my_second_value"
subject.keys.should =~ ["my_key", "my_second_key"]
end
end
end
describe "#delete" do
it "should delete the given key" do
subject["my_key"] = "my_value"
subject.delete("my_key")
expect(subject["my_key"]).to be_nil
end
end
describe "#keys" do
it "should return an array of the user's stored keys" do
subject["my_key"] = "my_value"
subject["my_second_key"] = "my_second_value"
expect(subject.keys).to match(["my_key", "my_second_key"])
end
end
end
end
<MSG> Changed config to throw error when lookup_by is undefined
<DFF> @@ -2,7 +2,7 @@ require "spec_helper"
describe Split::Persistence::RedisAdapter do
- let(:context) { nil }
+ let(:context) { double(:lookup => 'blah') }
subject { Split::Persistence::RedisAdapter.new(context) }
@@ -10,16 +10,26 @@ describe Split::Persistence::RedisAdapter do
before { Split::Persistence::RedisAdapter.reset_config! }
context 'default' do
- it 'should be "persistence"' do
- subject.redis_key.should == 'persistence'
+ it 'should raise error with prompt to set lookup_by' do
+ expect{Split::Persistence::RedisAdapter.new(context)
+ }.to raise_error
end
end
- context 'config with namespace' do
- before { Split::Persistence::RedisAdapter.with_config(:namespace => 'namer') }
+ context 'config with lookup_by = proc { "block" }' do
+ before { Split::Persistence::RedisAdapter.with_config(:lookup_by => proc{'block'}) }
- it 'should be "namer"' do
- subject.redis_key.should == 'namer'
+ it 'should be "persistence:block"' do
+ subject.redis_key.should == 'persistence:block'
+ end
+ end
+
+ context 'config with lookup_by = proc { |context| context.test }' do
+ before { Split::Persistence::RedisAdapter.with_config(:lookup_by => proc{'block'}) }
+ let(:context) { double(test: 'block') }
+
+ it 'should be "persistence:block"' do
+ subject.redis_key.should == 'persistence:block'
end
end
@@ -32,36 +42,40 @@ describe Split::Persistence::RedisAdapter do
end
end
- context 'config with lookup_by = proc { "block" }' do
- before { Split::Persistence::RedisAdapter.with_config(:lookup_by => proc{'block'}) }
+ context 'config with namespace and lookup_by' do
+ before { Split::Persistence::RedisAdapter.with_config(:lookup_by => proc{'frag'}, :namespace => 'namer') }
- it 'should be "persistence:block"' do
- subject.redis_key.should == 'persistence:block'
+ it 'should be "namer"' do
+ subject.redis_key.should == 'namer:frag'
end
end
end
- describe "#[] and #[]=" do
- it "should set and return the value for given key" do
- subject["my_key"] = "my_value"
- subject["my_key"].should eq("my_value")
+ context 'functional tests' do
+ before { Split::Persistence::RedisAdapter.with_config(:lookup_by => 'lookup') }
+
+ describe "#[] and #[]=" do
+ it "should set and return the value for given key" do
+ subject["my_key"] = "my_value"
+ subject["my_key"].should eq("my_value")
+ end
end
- end
- describe "#delete" do
- it "should delete the given key" do
- subject["my_key"] = "my_value"
- subject.delete("my_key")
- subject["my_key"].should be_nil
+ describe "#delete" do
+ it "should delete the given key" do
+ subject["my_key"] = "my_value"
+ subject.delete("my_key")
+ subject["my_key"].should be_nil
+ end
end
- end
- describe "#keys" do
- it "should return an array of the user's stored keys" do
- subject["my_key"] = "my_value"
- subject["my_second_key"] = "my_second_value"
- subject.keys.should =~ ["my_key", "my_second_key"]
+ describe "#keys" do
+ it "should return an array of the user's stored keys" do
+ subject["my_key"] = "my_value"
+ subject["my_second_key"] = "my_second_value"
+ subject.keys.should =~ ["my_key", "my_second_key"]
+ end
end
- end
+ end
end
| 42 | Changed config to throw error when lookup_by is undefined | 28 | .rb | rb | mit | splitrb/split |
10071097 | <NME> redis_adapter_spec.rb
<BEF> # frozen_string_literal: true
describe Split::Persistence::RedisAdapter do
let(:context) { nil }
subject { Split::Persistence::RedisAdapter.new(context) }
before { Split::Persistence::RedisAdapter.reset_config! }
context 'default' do
it 'should be "persistence"' do
subject.redis_key.should == 'persistence'
end
end
context 'config with namespace' do
before { Split::Persistence::RedisAdapter.with_config(:namespace => 'namer') }
it 'should be "namer"' do
subject.redis_key.should == 'namer'
end
end
context "config with lookup_by = proc { |context| context.test }" do
before { Split::Persistence::RedisAdapter.with_config(lookup_by: proc { "block" }) }
let(:context) { double(test: "block") }
it 'should be "persistence:block"' do
expect(subject.redis_key).to eq("persistence:block")
end
end
context 'config with lookup_by = proc { "block" }' do
before { Split::Persistence::RedisAdapter.with_config(:lookup_by => proc{'block'}) }
it 'should be "persistence:block"' do
subject.redis_key.should == 'persistence:block'
end
end
end
describe "#[] and #[]=" do
it "should set and return the value for given key" do
subject["my_key"] = "my_value"
subject["my_key"].should eq("my_value")
end
end
describe "#delete" do
it "should delete the given key" do
subject["my_key"] = "my_value"
subject.delete("my_key")
subject["my_key"].should be_nil
end
end
describe "#keys" do
it "should return an array of the user's stored keys" do
subject["my_key"] = "my_value"
subject["my_second_key"] = "my_second_value"
subject.keys.should =~ ["my_key", "my_second_key"]
end
end
end
describe "#delete" do
it "should delete the given key" do
subject["my_key"] = "my_value"
subject.delete("my_key")
expect(subject["my_key"]).to be_nil
end
end
describe "#keys" do
it "should return an array of the user's stored keys" do
subject["my_key"] = "my_value"
subject["my_second_key"] = "my_second_value"
expect(subject.keys).to match(["my_key", "my_second_key"])
end
end
end
end
<MSG> Changed config to throw error when lookup_by is undefined
<DFF> @@ -2,7 +2,7 @@ require "spec_helper"
describe Split::Persistence::RedisAdapter do
- let(:context) { nil }
+ let(:context) { double(:lookup => 'blah') }
subject { Split::Persistence::RedisAdapter.new(context) }
@@ -10,16 +10,26 @@ describe Split::Persistence::RedisAdapter do
before { Split::Persistence::RedisAdapter.reset_config! }
context 'default' do
- it 'should be "persistence"' do
- subject.redis_key.should == 'persistence'
+ it 'should raise error with prompt to set lookup_by' do
+ expect{Split::Persistence::RedisAdapter.new(context)
+ }.to raise_error
end
end
- context 'config with namespace' do
- before { Split::Persistence::RedisAdapter.with_config(:namespace => 'namer') }
+ context 'config with lookup_by = proc { "block" }' do
+ before { Split::Persistence::RedisAdapter.with_config(:lookup_by => proc{'block'}) }
- it 'should be "namer"' do
- subject.redis_key.should == 'namer'
+ it 'should be "persistence:block"' do
+ subject.redis_key.should == 'persistence:block'
+ end
+ end
+
+ context 'config with lookup_by = proc { |context| context.test }' do
+ before { Split::Persistence::RedisAdapter.with_config(:lookup_by => proc{'block'}) }
+ let(:context) { double(test: 'block') }
+
+ it 'should be "persistence:block"' do
+ subject.redis_key.should == 'persistence:block'
end
end
@@ -32,36 +42,40 @@ describe Split::Persistence::RedisAdapter do
end
end
- context 'config with lookup_by = proc { "block" }' do
- before { Split::Persistence::RedisAdapter.with_config(:lookup_by => proc{'block'}) }
+ context 'config with namespace and lookup_by' do
+ before { Split::Persistence::RedisAdapter.with_config(:lookup_by => proc{'frag'}, :namespace => 'namer') }
- it 'should be "persistence:block"' do
- subject.redis_key.should == 'persistence:block'
+ it 'should be "namer"' do
+ subject.redis_key.should == 'namer:frag'
end
end
end
- describe "#[] and #[]=" do
- it "should set and return the value for given key" do
- subject["my_key"] = "my_value"
- subject["my_key"].should eq("my_value")
+ context 'functional tests' do
+ before { Split::Persistence::RedisAdapter.with_config(:lookup_by => 'lookup') }
+
+ describe "#[] and #[]=" do
+ it "should set and return the value for given key" do
+ subject["my_key"] = "my_value"
+ subject["my_key"].should eq("my_value")
+ end
end
- end
- describe "#delete" do
- it "should delete the given key" do
- subject["my_key"] = "my_value"
- subject.delete("my_key")
- subject["my_key"].should be_nil
+ describe "#delete" do
+ it "should delete the given key" do
+ subject["my_key"] = "my_value"
+ subject.delete("my_key")
+ subject["my_key"].should be_nil
+ end
end
- end
- describe "#keys" do
- it "should return an array of the user's stored keys" do
- subject["my_key"] = "my_value"
- subject["my_second_key"] = "my_second_value"
- subject.keys.should =~ ["my_key", "my_second_key"]
+ describe "#keys" do
+ it "should return an array of the user's stored keys" do
+ subject["my_key"] = "my_value"
+ subject["my_second_key"] = "my_second_value"
+ subject.keys.should =~ ["my_key", "my_second_key"]
+ end
end
- end
+ end
end
| 42 | Changed config to throw error when lookup_by is undefined | 28 | .rb | rb | mit | splitrb/split |
10071098 | <NME> redis_adapter_spec.rb
<BEF> # frozen_string_literal: true
describe Split::Persistence::RedisAdapter do
let(:context) { nil }
subject { Split::Persistence::RedisAdapter.new(context) }
before { Split::Persistence::RedisAdapter.reset_config! }
context 'default' do
it 'should be "persistence"' do
subject.redis_key.should == 'persistence'
end
end
context 'config with namespace' do
before { Split::Persistence::RedisAdapter.with_config(:namespace => 'namer') }
it 'should be "namer"' do
subject.redis_key.should == 'namer'
end
end
context "config with lookup_by = proc { |context| context.test }" do
before { Split::Persistence::RedisAdapter.with_config(lookup_by: proc { "block" }) }
let(:context) { double(test: "block") }
it 'should be "persistence:block"' do
expect(subject.redis_key).to eq("persistence:block")
end
end
context 'config with lookup_by = proc { "block" }' do
before { Split::Persistence::RedisAdapter.with_config(:lookup_by => proc{'block'}) }
it 'should be "persistence:block"' do
subject.redis_key.should == 'persistence:block'
end
end
end
describe "#[] and #[]=" do
it "should set and return the value for given key" do
subject["my_key"] = "my_value"
subject["my_key"].should eq("my_value")
end
end
describe "#delete" do
it "should delete the given key" do
subject["my_key"] = "my_value"
subject.delete("my_key")
subject["my_key"].should be_nil
end
end
describe "#keys" do
it "should return an array of the user's stored keys" do
subject["my_key"] = "my_value"
subject["my_second_key"] = "my_second_value"
subject.keys.should =~ ["my_key", "my_second_key"]
end
end
end
describe "#delete" do
it "should delete the given key" do
subject["my_key"] = "my_value"
subject.delete("my_key")
expect(subject["my_key"]).to be_nil
end
end
describe "#keys" do
it "should return an array of the user's stored keys" do
subject["my_key"] = "my_value"
subject["my_second_key"] = "my_second_value"
expect(subject.keys).to match(["my_key", "my_second_key"])
end
end
end
end
<MSG> Changed config to throw error when lookup_by is undefined
<DFF> @@ -2,7 +2,7 @@ require "spec_helper"
describe Split::Persistence::RedisAdapter do
- let(:context) { nil }
+ let(:context) { double(:lookup => 'blah') }
subject { Split::Persistence::RedisAdapter.new(context) }
@@ -10,16 +10,26 @@ describe Split::Persistence::RedisAdapter do
before { Split::Persistence::RedisAdapter.reset_config! }
context 'default' do
- it 'should be "persistence"' do
- subject.redis_key.should == 'persistence'
+ it 'should raise error with prompt to set lookup_by' do
+ expect{Split::Persistence::RedisAdapter.new(context)
+ }.to raise_error
end
end
- context 'config with namespace' do
- before { Split::Persistence::RedisAdapter.with_config(:namespace => 'namer') }
+ context 'config with lookup_by = proc { "block" }' do
+ before { Split::Persistence::RedisAdapter.with_config(:lookup_by => proc{'block'}) }
- it 'should be "namer"' do
- subject.redis_key.should == 'namer'
+ it 'should be "persistence:block"' do
+ subject.redis_key.should == 'persistence:block'
+ end
+ end
+
+ context 'config with lookup_by = proc { |context| context.test }' do
+ before { Split::Persistence::RedisAdapter.with_config(:lookup_by => proc{'block'}) }
+ let(:context) { double(test: 'block') }
+
+ it 'should be "persistence:block"' do
+ subject.redis_key.should == 'persistence:block'
end
end
@@ -32,36 +42,40 @@ describe Split::Persistence::RedisAdapter do
end
end
- context 'config with lookup_by = proc { "block" }' do
- before { Split::Persistence::RedisAdapter.with_config(:lookup_by => proc{'block'}) }
+ context 'config with namespace and lookup_by' do
+ before { Split::Persistence::RedisAdapter.with_config(:lookup_by => proc{'frag'}, :namespace => 'namer') }
- it 'should be "persistence:block"' do
- subject.redis_key.should == 'persistence:block'
+ it 'should be "namer"' do
+ subject.redis_key.should == 'namer:frag'
end
end
end
- describe "#[] and #[]=" do
- it "should set and return the value for given key" do
- subject["my_key"] = "my_value"
- subject["my_key"].should eq("my_value")
+ context 'functional tests' do
+ before { Split::Persistence::RedisAdapter.with_config(:lookup_by => 'lookup') }
+
+ describe "#[] and #[]=" do
+ it "should set and return the value for given key" do
+ subject["my_key"] = "my_value"
+ subject["my_key"].should eq("my_value")
+ end
end
- end
- describe "#delete" do
- it "should delete the given key" do
- subject["my_key"] = "my_value"
- subject.delete("my_key")
- subject["my_key"].should be_nil
+ describe "#delete" do
+ it "should delete the given key" do
+ subject["my_key"] = "my_value"
+ subject.delete("my_key")
+ subject["my_key"].should be_nil
+ end
end
- end
- describe "#keys" do
- it "should return an array of the user's stored keys" do
- subject["my_key"] = "my_value"
- subject["my_second_key"] = "my_second_value"
- subject.keys.should =~ ["my_key", "my_second_key"]
+ describe "#keys" do
+ it "should return an array of the user's stored keys" do
+ subject["my_key"] = "my_value"
+ subject["my_second_key"] = "my_second_value"
+ subject.keys.should =~ ["my_key", "my_second_key"]
+ end
end
- end
+ end
end
| 42 | Changed config to throw error when lookup_by is undefined | 28 | .rb | rb | mit | splitrb/split |
10071099 | <NME> models.py
<BEF> import os
from django.conf import settings
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.contrib.auth.models import User
OS_NAMES = (
("aix", "AIX"),
("beos", "BeOS"),
("debian", "Debian Linux"),
("dos", "DOS"),
("freebsd", "FreeBSD"),
("hpux", "HP/UX"),
("mac", "Mac System x."),
("macos", "MacOS X"),
("mandrake", "Mandrake Linux"),
("netbsd", "NetBSD"),
("openbsd", "OpenBSD"),
("qnx", "QNX"),
("redhat", "RedHat Linux"),
("solaris", "SUN Solaris"),
("suse", "SuSE Linux"),
("yellowdog", "Yellow Dog Linux"),
)
ARCHITECTURES = (
("alpha", "Alpha"),
("hppa", "HPPA"),
("ix86", "Intel"),
("powerpc", "PowerPC"),
"""
import os
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.contrib.auth.models import User
class Classifier(models.Model):
name = models.CharField(max_length=255, unique=True)
class Meta:
verbose_name = _(u"classifier")
verbose_name_plural = _(u"classifiers")
def __unicode__(self):
return self.name
class Project(models.Model):
name = models.CharField(max_length=255, unique=True)
license = models.TextField(blank=True)
metadata_version = models.CharField(max_length=64, default=1.0)
author = models.CharField(max_length=128, blank=True)
home_page = models.URLField(verify_exists=False, blank=True, null=True)
download_url = models.CharField(max_length=200, blank=True, null=True)
summary = models.TextField(blank=True)
description = models.TextField(blank=True)
author_email = models.CharField(max_length=255, blank=True)
classifiers = models.ManyToManyField(Classifier)
owner = models.ForeignKey(User, related_name="projects")
updated = models.DateTimeField(auto_now=True)
class Meta:
("ultrasparc", "UltraSparc"),
)
class Classifier(models.Model):
name = models.CharField(max_length=255, unique=True)
return ('djangopypi-show_links', (), {'dist_name': self.name})
@models.permalink
def get_pypi_absolute_url(self):
return ('djangopypi-pypi_show_links', (), {'dist_name': self.name})
def get_release(self, version):
"""Return the release object for version, or None"""
try:
return self.releases.get(version=version)
except Release.DoesNotExist:
return None
class Release(models.Model):
version = models.CharField(max_length=32)
distribution = models.FileField(upload_to=UPLOAD_TO)
md5_digest = models.CharField(max_length=255, blank=True)
platform = models.CharField(max_length=128, blank=True)
signature = models.CharField(max_length=128, blank=True)
filetype = models.CharField(max_length=255, blank=True)
pyversion = models.CharField(max_length=32, blank=True)
project = models.ForeignKey(Project, related_name="releases")
upload_time = models.DateTimeField(auto_now=True)
class Meta:
verbose_name = _(u"release")
verbose_name_plural = _(u"releases")
unique_together = ("project", "version", "platform", "distribution", "pyversion")
def __unicode__(self):
return u"%s (%s)" % (self.release_name, self.platform)
@property
def type(self):
dist_file_types = {
'sdist':'Source',
'bdist_dumb':'"dumb" binary',
'bdist_rpm':'RPM',
class Release(models.Model):
version = models.CharField(max_length=128)
distribution = models.FileField(upload_to="dists")
md5_digest = models.CharField(max_length=255, blank=True)
platform = models.CharField(max_length=255, blank=True)
signature = models.CharField(max_length=128, blank=True)
return os.path.basename(self.distribution.name)
@property
def release_name(self):
return u"%s-%s" % (self.project.name, self.version)
@property
def path(self):
return self.distribution.name
@models.permalink
def get_absolute_url(self):
return ('djangopypi-show_version', (), {'dist_name': self.project, 'version': self.version})
def get_dl_url(self):
return "%s#md5=%s" % (self.distribution.url, self.md5_digest)
<MSG> complete refactor to use ModelForm
<DFF> @@ -31,6 +31,7 @@ POSSIBILITY OF SUCH DAMAGE.
"""
import os
+from django.conf import settings
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.contrib.auth.models import User
@@ -63,6 +64,8 @@ ARCHITECTURES = (
("ultrasparc", "UltraSparc"),
)
+UPLOAD_TO = getattr(settings,
+ "DJANGOPYPI_RELEASE_UPLOAD_TO", 'dist')
class Classifier(models.Model):
name = models.CharField(max_length=255, unique=True)
@@ -107,7 +110,7 @@ class Project(models.Model):
class Release(models.Model):
version = models.CharField(max_length=128)
- distribution = models.FileField(upload_to="dists")
+ distribution = models.FileField(upload_to=UPLOAD_TO)
md5_digest = models.CharField(max_length=255, blank=True)
platform = models.CharField(max_length=255, blank=True)
signature = models.CharField(max_length=128, blank=True)
| 4 | complete refactor to use ModelForm | 1 | .py | py | bsd-3-clause | ask/chishop |
10071100 | <NME> dashboard_spec.rb
<BEF> # frozen_string_literal: true
require "spec_helper"
require "rack/test"
require "split/dashboard"
describe Split::Dashboard do
include Rack::Test::Methods
class TestDashboard < Split::Dashboard
include Split::Helper
get "/my_experiment" do
ab_test(params[:experiment], "blue", "red")
end
end
def app
@app ||= TestDashboard
end
def link(color)
Split::Alternative.new(color, experiment.name)
end
let(:experiment) {
Split::ExperimentCatalog.find_or_create("link_color", "blue", "red")
}
let(:experiment_with_goals) {
Split::ExperimentCatalog.find_or_create({ "link_color" => ["goal_1", "goal_2"] }, "blue", "red")
}
let(:metric) {
Split::Metric.find_or_create(name: "testmetric", experiments: [experiment, experiment_with_goals])
}
let(:red_link) { link("red") }
let(:blue_link) { link("blue") }
before(:each) do
Split.configuration.beta_probability_simulations = 1
end
it "should respond to /" do
get "/"
expect(last_response).to be_ok
end
context "start experiment manually" do
before do
Split.configuration.start_manually = true
end
context "experiment without goals" do
it "should display a Start button" do
experiment
get "/"
expect(last_response.body).to include("Start")
post "/start?experiment=#{experiment.name}"
get "/"
expect(last_response.body).to include("Reset Data")
expect(last_response.body).not_to include("Metrics:")
end
end
context "experiment with metrics" do
it "should display the names of associated metrics" do
metric
get "/"
expect(last_response.body).to include("Metrics:testmetric")
end
end
context "with goals" do
it "should display a Start button" do
experiment_with_goals
get "/"
expect(last_response.body).to include("Start")
post "/start?experiment=#{experiment.name}"
get "/"
expect(last_response.body).to include("Reset Data")
end
end
end
describe "force alternative" do
context "initial version" do
let!(:user) do
Split::User.new(@app, { experiment.name => "red" })
end
before do
allow(Split::User).to receive(:new).and_return(user)
end
it "should set current user's alternative" do
blue_link.participant_count = 7
post "/force_alternative?experiment=#{experiment.name}", alternative: "blue"
get "/my_experiment?experiment=#{experiment.name}"
expect(last_response.body).to include("blue")
end
it "should not modify an existing user" do
blue_link.participant_count = 7
post "/force_alternative?experiment=#{experiment.name}", alternative: "blue"
expect(user[experiment.key]).to eq("red")
expect(blue_link.participant_count).to eq(7)
end
end
context "incremented version" do
let!(:user) do
experiment.increment_version
Split::User.new(@app, { "#{experiment.name}:#{experiment.version}" => "red" })
end
before do
allow(Split::User).to receive(:new).and_return(user)
end
it "should set current user's alternative" do
blue_link.participant_count = 7
post "/force_alternative?experiment=#{experiment.name}", alternative: "blue"
get "/my_experiment?experiment=#{experiment.name}"
expect(last_response.body).to include("blue")
end
end
end
describe "index page" do
context "with winner" do
before { experiment.winner = "red" }
it "displays `Reopen Experiment` button" do
get "/"
expect(last_response.body).to include("Reopen Experiment")
end
end
context "without winner" do
it "should not display `Reopen Experiment` button" do
get "/"
expect(last_response.body).to_not include("Reopen Experiment")
end
end
end
describe "reopen experiment" do
before { experiment.winner = "red" }
it "redirects" do
post "/reopen?experiment=#{experiment.name}"
expect(last_response).to be_redirect
end
it "removes winner" do
post "/reopen?experiment=#{experiment.name}"
expect(Split::ExperimentCatalog.find(experiment.name)).to_not have_winner
end
it "keeps existing stats" do
red_link.participant_count = 5
blue_link.participant_count = 7
experiment.winner = "blue"
post "/reopen?experiment=#{experiment.name}"
expect(red_link.participant_count).to eq(5)
expect(blue_link.participant_count).to eq(7)
end
end
describe "update cohorting" do
it "calls enable of cohorting when action is enable" do
post "/update_cohorting?experiment=#{experiment.name}", { "cohorting_action": "enable" }
expect(experiment.cohorting_disabled?).to eq false
end
it "calls disable of cohorting when action is disable" do
post "/update_cohorting?experiment=#{experiment.name}", { "cohorting_action": "disable" }
expect(experiment.cohorting_disabled?).to eq true
end
it "calls neither enable or disable cohorting when passed invalid action" do
previous_value = experiment.cohorting_disabled?
post "/update_cohorting?experiment=#{experiment.name}", { "cohorting_action": "other" }
expect(experiment.cohorting_disabled?).to eq previous_value
end
end
describe "initialize experiment" do
before do
Split.configuration.experiments = {
:my_experiment => {
:alternatives => [ "control", "alternative" ],
}
}
end
it "initializes the experiment when the experiment is given" do
expect(Split::ExperimentCatalog.find("my_experiment")).to be nil
post "/initialize_experiment", { experiment: "my_experiment"}
experiment = Split::ExperimentCatalog.find("my_experiment")
expect(experiment).to be_a(Split::Experiment)
end
it "does not attempt to intialize the experiment when empty experiment is given" do
post "/initialize_experiment", { experiment: ""}
expect(Split::ExperimentCatalog).to_not receive(:find_or_create)
end
it "does not attempt to intialize the experiment when no experiment is given" do
post "/initialize_experiment"
expect(Split::ExperimentCatalog).to_not receive(:find_or_create)
end
end
it "should reset an experiment" do
red_link.participant_count = 5
blue_link.participant_count = 7
experiment.winner = "blue"
post "/reset?experiment=#{experiment.name}"
expect(last_response).to be_redirect
new_red_count = red_link.participant_count
new_blue_count = blue_link.participant_count
expect(new_blue_count).to eq(0)
expect(new_red_count).to eq(0)
expect(experiment.winner).to be_nil
end
it "should delete an experiment" do
delete "/experiment?experiment=#{experiment.name}"
expect(last_response).to be_redirect
expect(Split::ExperimentCatalog.find(experiment.name)).to be_nil
end
it "should mark an alternative as the winner" do
expect(experiment.winner).to be_nil
post "/experiment?experiment=#{experiment.name}", alternative: "red"
expect(last_response).to be_redirect
expect(experiment.winner.name).to eq("red")
end
it "should display the start date" do
experiment.start
get "/"
expect(last_response.body).to include("<small>#{experiment.start_time.strftime('%Y-%m-%d')}</small>")
end
it "should handle experiments without a start date" do
Split.redis.hdel(:experiment_start_times, experiment.name)
get "/"
expect(last_response.body).to include("<small>Unknown</small>")
end
end
<MSG> Fix remaining rubocop offenses
<DFF> @@ -203,10 +203,10 @@ describe Split::Dashboard do
end
describe "initialize experiment" do
- before do
+ before do
Split.configuration.experiments = {
- :my_experiment => {
- :alternatives => [ "control", "alternative" ],
+ my_experiment: {
+ alternatives: [ "control", "alternative" ],
}
}
end
@@ -214,14 +214,14 @@ describe Split::Dashboard do
it "initializes the experiment when the experiment is given" do
expect(Split::ExperimentCatalog.find("my_experiment")).to be nil
- post "/initialize_experiment", { experiment: "my_experiment"}
+ post "/initialize_experiment", { experiment: "my_experiment" }
experiment = Split::ExperimentCatalog.find("my_experiment")
expect(experiment).to be_a(Split::Experiment)
end
it "does not attempt to intialize the experiment when empty experiment is given" do
- post "/initialize_experiment", { experiment: ""}
+ post "/initialize_experiment", { experiment: "" }
expect(Split::ExperimentCatalog).to_not receive(:find_or_create)
end
| 5 | Fix remaining rubocop offenses | 5 | .rb | rb | mit | splitrb/split |
10071101 | <NME> dashboard_spec.rb
<BEF> # frozen_string_literal: true
require "spec_helper"
require "rack/test"
require "split/dashboard"
describe Split::Dashboard do
include Rack::Test::Methods
class TestDashboard < Split::Dashboard
include Split::Helper
get "/my_experiment" do
ab_test(params[:experiment], "blue", "red")
end
end
def app
@app ||= TestDashboard
end
def link(color)
Split::Alternative.new(color, experiment.name)
end
let(:experiment) {
Split::ExperimentCatalog.find_or_create("link_color", "blue", "red")
}
let(:experiment_with_goals) {
Split::ExperimentCatalog.find_or_create({ "link_color" => ["goal_1", "goal_2"] }, "blue", "red")
}
let(:metric) {
Split::Metric.find_or_create(name: "testmetric", experiments: [experiment, experiment_with_goals])
}
let(:red_link) { link("red") }
let(:blue_link) { link("blue") }
before(:each) do
Split.configuration.beta_probability_simulations = 1
end
it "should respond to /" do
get "/"
expect(last_response).to be_ok
end
context "start experiment manually" do
before do
Split.configuration.start_manually = true
end
context "experiment without goals" do
it "should display a Start button" do
experiment
get "/"
expect(last_response.body).to include("Start")
post "/start?experiment=#{experiment.name}"
get "/"
expect(last_response.body).to include("Reset Data")
expect(last_response.body).not_to include("Metrics:")
end
end
context "experiment with metrics" do
it "should display the names of associated metrics" do
metric
get "/"
expect(last_response.body).to include("Metrics:testmetric")
end
end
context "with goals" do
it "should display a Start button" do
experiment_with_goals
get "/"
expect(last_response.body).to include("Start")
post "/start?experiment=#{experiment.name}"
get "/"
expect(last_response.body).to include("Reset Data")
end
end
end
describe "force alternative" do
context "initial version" do
let!(:user) do
Split::User.new(@app, { experiment.name => "red" })
end
before do
allow(Split::User).to receive(:new).and_return(user)
end
it "should set current user's alternative" do
blue_link.participant_count = 7
post "/force_alternative?experiment=#{experiment.name}", alternative: "blue"
get "/my_experiment?experiment=#{experiment.name}"
expect(last_response.body).to include("blue")
end
it "should not modify an existing user" do
blue_link.participant_count = 7
post "/force_alternative?experiment=#{experiment.name}", alternative: "blue"
expect(user[experiment.key]).to eq("red")
expect(blue_link.participant_count).to eq(7)
end
end
context "incremented version" do
let!(:user) do
experiment.increment_version
Split::User.new(@app, { "#{experiment.name}:#{experiment.version}" => "red" })
end
before do
allow(Split::User).to receive(:new).and_return(user)
end
it "should set current user's alternative" do
blue_link.participant_count = 7
post "/force_alternative?experiment=#{experiment.name}", alternative: "blue"
get "/my_experiment?experiment=#{experiment.name}"
expect(last_response.body).to include("blue")
end
end
end
describe "index page" do
context "with winner" do
before { experiment.winner = "red" }
it "displays `Reopen Experiment` button" do
get "/"
expect(last_response.body).to include("Reopen Experiment")
end
end
context "without winner" do
it "should not display `Reopen Experiment` button" do
get "/"
expect(last_response.body).to_not include("Reopen Experiment")
end
end
end
describe "reopen experiment" do
before { experiment.winner = "red" }
it "redirects" do
post "/reopen?experiment=#{experiment.name}"
expect(last_response).to be_redirect
end
it "removes winner" do
post "/reopen?experiment=#{experiment.name}"
expect(Split::ExperimentCatalog.find(experiment.name)).to_not have_winner
end
it "keeps existing stats" do
red_link.participant_count = 5
blue_link.participant_count = 7
experiment.winner = "blue"
post "/reopen?experiment=#{experiment.name}"
expect(red_link.participant_count).to eq(5)
expect(blue_link.participant_count).to eq(7)
end
end
describe "update cohorting" do
it "calls enable of cohorting when action is enable" do
post "/update_cohorting?experiment=#{experiment.name}", { "cohorting_action": "enable" }
expect(experiment.cohorting_disabled?).to eq false
end
it "calls disable of cohorting when action is disable" do
post "/update_cohorting?experiment=#{experiment.name}", { "cohorting_action": "disable" }
expect(experiment.cohorting_disabled?).to eq true
end
it "calls neither enable or disable cohorting when passed invalid action" do
previous_value = experiment.cohorting_disabled?
post "/update_cohorting?experiment=#{experiment.name}", { "cohorting_action": "other" }
expect(experiment.cohorting_disabled?).to eq previous_value
end
end
describe "initialize experiment" do
before do
Split.configuration.experiments = {
:my_experiment => {
:alternatives => [ "control", "alternative" ],
}
}
end
it "initializes the experiment when the experiment is given" do
expect(Split::ExperimentCatalog.find("my_experiment")).to be nil
post "/initialize_experiment", { experiment: "my_experiment"}
experiment = Split::ExperimentCatalog.find("my_experiment")
expect(experiment).to be_a(Split::Experiment)
end
it "does not attempt to intialize the experiment when empty experiment is given" do
post "/initialize_experiment", { experiment: ""}
expect(Split::ExperimentCatalog).to_not receive(:find_or_create)
end
it "does not attempt to intialize the experiment when no experiment is given" do
post "/initialize_experiment"
expect(Split::ExperimentCatalog).to_not receive(:find_or_create)
end
end
it "should reset an experiment" do
red_link.participant_count = 5
blue_link.participant_count = 7
experiment.winner = "blue"
post "/reset?experiment=#{experiment.name}"
expect(last_response).to be_redirect
new_red_count = red_link.participant_count
new_blue_count = blue_link.participant_count
expect(new_blue_count).to eq(0)
expect(new_red_count).to eq(0)
expect(experiment.winner).to be_nil
end
it "should delete an experiment" do
delete "/experiment?experiment=#{experiment.name}"
expect(last_response).to be_redirect
expect(Split::ExperimentCatalog.find(experiment.name)).to be_nil
end
it "should mark an alternative as the winner" do
expect(experiment.winner).to be_nil
post "/experiment?experiment=#{experiment.name}", alternative: "red"
expect(last_response).to be_redirect
expect(experiment.winner.name).to eq("red")
end
it "should display the start date" do
experiment.start
get "/"
expect(last_response.body).to include("<small>#{experiment.start_time.strftime('%Y-%m-%d')}</small>")
end
it "should handle experiments without a start date" do
Split.redis.hdel(:experiment_start_times, experiment.name)
get "/"
expect(last_response.body).to include("<small>Unknown</small>")
end
end
<MSG> Fix remaining rubocop offenses
<DFF> @@ -203,10 +203,10 @@ describe Split::Dashboard do
end
describe "initialize experiment" do
- before do
+ before do
Split.configuration.experiments = {
- :my_experiment => {
- :alternatives => [ "control", "alternative" ],
+ my_experiment: {
+ alternatives: [ "control", "alternative" ],
}
}
end
@@ -214,14 +214,14 @@ describe Split::Dashboard do
it "initializes the experiment when the experiment is given" do
expect(Split::ExperimentCatalog.find("my_experiment")).to be nil
- post "/initialize_experiment", { experiment: "my_experiment"}
+ post "/initialize_experiment", { experiment: "my_experiment" }
experiment = Split::ExperimentCatalog.find("my_experiment")
expect(experiment).to be_a(Split::Experiment)
end
it "does not attempt to intialize the experiment when empty experiment is given" do
- post "/initialize_experiment", { experiment: ""}
+ post "/initialize_experiment", { experiment: "" }
expect(Split::ExperimentCatalog).to_not receive(:find_or_create)
end
| 5 | Fix remaining rubocop offenses | 5 | .rb | rb | mit | splitrb/split |
10071102 | <NME> dashboard_spec.rb
<BEF> # frozen_string_literal: true
require "spec_helper"
require "rack/test"
require "split/dashboard"
describe Split::Dashboard do
include Rack::Test::Methods
class TestDashboard < Split::Dashboard
include Split::Helper
get "/my_experiment" do
ab_test(params[:experiment], "blue", "red")
end
end
def app
@app ||= TestDashboard
end
def link(color)
Split::Alternative.new(color, experiment.name)
end
let(:experiment) {
Split::ExperimentCatalog.find_or_create("link_color", "blue", "red")
}
let(:experiment_with_goals) {
Split::ExperimentCatalog.find_or_create({ "link_color" => ["goal_1", "goal_2"] }, "blue", "red")
}
let(:metric) {
Split::Metric.find_or_create(name: "testmetric", experiments: [experiment, experiment_with_goals])
}
let(:red_link) { link("red") }
let(:blue_link) { link("blue") }
before(:each) do
Split.configuration.beta_probability_simulations = 1
end
it "should respond to /" do
get "/"
expect(last_response).to be_ok
end
context "start experiment manually" do
before do
Split.configuration.start_manually = true
end
context "experiment without goals" do
it "should display a Start button" do
experiment
get "/"
expect(last_response.body).to include("Start")
post "/start?experiment=#{experiment.name}"
get "/"
expect(last_response.body).to include("Reset Data")
expect(last_response.body).not_to include("Metrics:")
end
end
context "experiment with metrics" do
it "should display the names of associated metrics" do
metric
get "/"
expect(last_response.body).to include("Metrics:testmetric")
end
end
context "with goals" do
it "should display a Start button" do
experiment_with_goals
get "/"
expect(last_response.body).to include("Start")
post "/start?experiment=#{experiment.name}"
get "/"
expect(last_response.body).to include("Reset Data")
end
end
end
describe "force alternative" do
context "initial version" do
let!(:user) do
Split::User.new(@app, { experiment.name => "red" })
end
before do
allow(Split::User).to receive(:new).and_return(user)
end
it "should set current user's alternative" do
blue_link.participant_count = 7
post "/force_alternative?experiment=#{experiment.name}", alternative: "blue"
get "/my_experiment?experiment=#{experiment.name}"
expect(last_response.body).to include("blue")
end
it "should not modify an existing user" do
blue_link.participant_count = 7
post "/force_alternative?experiment=#{experiment.name}", alternative: "blue"
expect(user[experiment.key]).to eq("red")
expect(blue_link.participant_count).to eq(7)
end
end
context "incremented version" do
let!(:user) do
experiment.increment_version
Split::User.new(@app, { "#{experiment.name}:#{experiment.version}" => "red" })
end
before do
allow(Split::User).to receive(:new).and_return(user)
end
it "should set current user's alternative" do
blue_link.participant_count = 7
post "/force_alternative?experiment=#{experiment.name}", alternative: "blue"
get "/my_experiment?experiment=#{experiment.name}"
expect(last_response.body).to include("blue")
end
end
end
describe "index page" do
context "with winner" do
before { experiment.winner = "red" }
it "displays `Reopen Experiment` button" do
get "/"
expect(last_response.body).to include("Reopen Experiment")
end
end
context "without winner" do
it "should not display `Reopen Experiment` button" do
get "/"
expect(last_response.body).to_not include("Reopen Experiment")
end
end
end
describe "reopen experiment" do
before { experiment.winner = "red" }
it "redirects" do
post "/reopen?experiment=#{experiment.name}"
expect(last_response).to be_redirect
end
it "removes winner" do
post "/reopen?experiment=#{experiment.name}"
expect(Split::ExperimentCatalog.find(experiment.name)).to_not have_winner
end
it "keeps existing stats" do
red_link.participant_count = 5
blue_link.participant_count = 7
experiment.winner = "blue"
post "/reopen?experiment=#{experiment.name}"
expect(red_link.participant_count).to eq(5)
expect(blue_link.participant_count).to eq(7)
end
end
describe "update cohorting" do
it "calls enable of cohorting when action is enable" do
post "/update_cohorting?experiment=#{experiment.name}", { "cohorting_action": "enable" }
expect(experiment.cohorting_disabled?).to eq false
end
it "calls disable of cohorting when action is disable" do
post "/update_cohorting?experiment=#{experiment.name}", { "cohorting_action": "disable" }
expect(experiment.cohorting_disabled?).to eq true
end
it "calls neither enable or disable cohorting when passed invalid action" do
previous_value = experiment.cohorting_disabled?
post "/update_cohorting?experiment=#{experiment.name}", { "cohorting_action": "other" }
expect(experiment.cohorting_disabled?).to eq previous_value
end
end
describe "initialize experiment" do
before do
Split.configuration.experiments = {
:my_experiment => {
:alternatives => [ "control", "alternative" ],
}
}
end
it "initializes the experiment when the experiment is given" do
expect(Split::ExperimentCatalog.find("my_experiment")).to be nil
post "/initialize_experiment", { experiment: "my_experiment"}
experiment = Split::ExperimentCatalog.find("my_experiment")
expect(experiment).to be_a(Split::Experiment)
end
it "does not attempt to intialize the experiment when empty experiment is given" do
post "/initialize_experiment", { experiment: ""}
expect(Split::ExperimentCatalog).to_not receive(:find_or_create)
end
it "does not attempt to intialize the experiment when no experiment is given" do
post "/initialize_experiment"
expect(Split::ExperimentCatalog).to_not receive(:find_or_create)
end
end
it "should reset an experiment" do
red_link.participant_count = 5
blue_link.participant_count = 7
experiment.winner = "blue"
post "/reset?experiment=#{experiment.name}"
expect(last_response).to be_redirect
new_red_count = red_link.participant_count
new_blue_count = blue_link.participant_count
expect(new_blue_count).to eq(0)
expect(new_red_count).to eq(0)
expect(experiment.winner).to be_nil
end
it "should delete an experiment" do
delete "/experiment?experiment=#{experiment.name}"
expect(last_response).to be_redirect
expect(Split::ExperimentCatalog.find(experiment.name)).to be_nil
end
it "should mark an alternative as the winner" do
expect(experiment.winner).to be_nil
post "/experiment?experiment=#{experiment.name}", alternative: "red"
expect(last_response).to be_redirect
expect(experiment.winner.name).to eq("red")
end
it "should display the start date" do
experiment.start
get "/"
expect(last_response.body).to include("<small>#{experiment.start_time.strftime('%Y-%m-%d')}</small>")
end
it "should handle experiments without a start date" do
Split.redis.hdel(:experiment_start_times, experiment.name)
get "/"
expect(last_response.body).to include("<small>Unknown</small>")
end
end
<MSG> Fix remaining rubocop offenses
<DFF> @@ -203,10 +203,10 @@ describe Split::Dashboard do
end
describe "initialize experiment" do
- before do
+ before do
Split.configuration.experiments = {
- :my_experiment => {
- :alternatives => [ "control", "alternative" ],
+ my_experiment: {
+ alternatives: [ "control", "alternative" ],
}
}
end
@@ -214,14 +214,14 @@ describe Split::Dashboard do
it "initializes the experiment when the experiment is given" do
expect(Split::ExperimentCatalog.find("my_experiment")).to be nil
- post "/initialize_experiment", { experiment: "my_experiment"}
+ post "/initialize_experiment", { experiment: "my_experiment" }
experiment = Split::ExperimentCatalog.find("my_experiment")
expect(experiment).to be_a(Split::Experiment)
end
it "does not attempt to intialize the experiment when empty experiment is given" do
- post "/initialize_experiment", { experiment: ""}
+ post "/initialize_experiment", { experiment: "" }
expect(Split::ExperimentCatalog).to_not receive(:find_or_create)
end
| 5 | Fix remaining rubocop offenses | 5 | .rb | rb | mit | splitrb/split |
10071103 | <NME> split.gemspec
<BEF> # -*- encoding: utf-8 -*-
# frozen_string_literal: true
$:.push File.expand_path("../lib", __FILE__)
require "split/version"
Gem::Specification.new do |s|
s.name = "split"
s.version = Split::VERSION
s.platform = Gem::Platform::RUBY
s.authors = ["Andrew Nesbitt"]
s.licenses = ["MIT"]
s.email = ["[email protected]"]
s.homepage = "https://github.com/splitrb/split"
s.summary = "Rack based split testing framework"
s.metadata = {
"homepage_uri" => "https://github.com/splitrb/split",
"changelog_uri" => "https://github.com/splitrb/split/blob/main/CHANGELOG.md",
"source_code_uri" => "https://github.com/splitrb/split",
"bug_tracker_uri" => "https://github.com/splitrb/split/issues",
"mailing_list_uri" => "https://groups.google.com/d/forum/split-ruby"
}
s.required_ruby_version = '>= 1.9.3'
s.required_rubygems_version = '>= 2.0.0'
s.files = `git ls-files`.split("\n")
s.files = `git ls-files`.split("\n")
s.test_files = `git ls-files -- {test,spec,features}/*`.split("\n")
s.require_paths = ["lib"]
s.add_dependency "redis", ">= 4.2"
s.add_dependency "sinatra", ">= 1.2.6"
s.add_dependency "rubystats", ">= 0.3.0"
s.add_development_dependency "bundler", ">= 1.17"
s.add_development_dependency "simplecov", "~> 0.15"
s.add_development_dependency "rack-test", "~> 2.0"
s.add_development_dependency "rake", "~> 13"
s.add_development_dependency "rspec", "~> 3.7"
s.add_development_dependency "pry", "~> 0.10"
s.add_development_dependency "rails", ">= 5.0"
end
<MSG> Drop support for ruby 1.9.3
<DFF> @@ -22,7 +22,7 @@ Gem::Specification.new do |s|
"mailing_list_uri" => "https://groups.google.com/d/forum/split-ruby"
}
- s.required_ruby_version = '>= 1.9.3'
+ s.required_ruby_version = '>= 2.2.2'
s.required_rubygems_version = '>= 2.0.0'
s.files = `git ls-files`.split("\n")
| 1 | Drop support for ruby 1.9.3 | 1 | .gemspec | gemspec | mit | splitrb/split |
10071104 | <NME> split.gemspec
<BEF> # -*- encoding: utf-8 -*-
# frozen_string_literal: true
$:.push File.expand_path("../lib", __FILE__)
require "split/version"
Gem::Specification.new do |s|
s.name = "split"
s.version = Split::VERSION
s.platform = Gem::Platform::RUBY
s.authors = ["Andrew Nesbitt"]
s.licenses = ["MIT"]
s.email = ["[email protected]"]
s.homepage = "https://github.com/splitrb/split"
s.summary = "Rack based split testing framework"
s.metadata = {
"homepage_uri" => "https://github.com/splitrb/split",
"changelog_uri" => "https://github.com/splitrb/split/blob/main/CHANGELOG.md",
"source_code_uri" => "https://github.com/splitrb/split",
"bug_tracker_uri" => "https://github.com/splitrb/split/issues",
"mailing_list_uri" => "https://groups.google.com/d/forum/split-ruby"
}
s.required_ruby_version = '>= 1.9.3'
s.required_rubygems_version = '>= 2.0.0'
s.files = `git ls-files`.split("\n")
s.files = `git ls-files`.split("\n")
s.test_files = `git ls-files -- {test,spec,features}/*`.split("\n")
s.require_paths = ["lib"]
s.add_dependency "redis", ">= 4.2"
s.add_dependency "sinatra", ">= 1.2.6"
s.add_dependency "rubystats", ">= 0.3.0"
s.add_development_dependency "bundler", ">= 1.17"
s.add_development_dependency "simplecov", "~> 0.15"
s.add_development_dependency "rack-test", "~> 2.0"
s.add_development_dependency "rake", "~> 13"
s.add_development_dependency "rspec", "~> 3.7"
s.add_development_dependency "pry", "~> 0.10"
s.add_development_dependency "rails", ">= 5.0"
end
<MSG> Drop support for ruby 1.9.3
<DFF> @@ -22,7 +22,7 @@ Gem::Specification.new do |s|
"mailing_list_uri" => "https://groups.google.com/d/forum/split-ruby"
}
- s.required_ruby_version = '>= 1.9.3'
+ s.required_ruby_version = '>= 2.2.2'
s.required_rubygems_version = '>= 2.0.0'
s.files = `git ls-files`.split("\n")
| 1 | Drop support for ruby 1.9.3 | 1 | .gemspec | gemspec | mit | splitrb/split |
10071105 | <NME> split.gemspec
<BEF> # -*- encoding: utf-8 -*-
# frozen_string_literal: true
$:.push File.expand_path("../lib", __FILE__)
require "split/version"
Gem::Specification.new do |s|
s.name = "split"
s.version = Split::VERSION
s.platform = Gem::Platform::RUBY
s.authors = ["Andrew Nesbitt"]
s.licenses = ["MIT"]
s.email = ["[email protected]"]
s.homepage = "https://github.com/splitrb/split"
s.summary = "Rack based split testing framework"
s.metadata = {
"homepage_uri" => "https://github.com/splitrb/split",
"changelog_uri" => "https://github.com/splitrb/split/blob/main/CHANGELOG.md",
"source_code_uri" => "https://github.com/splitrb/split",
"bug_tracker_uri" => "https://github.com/splitrb/split/issues",
"mailing_list_uri" => "https://groups.google.com/d/forum/split-ruby"
}
s.required_ruby_version = '>= 1.9.3'
s.required_rubygems_version = '>= 2.0.0'
s.files = `git ls-files`.split("\n")
s.files = `git ls-files`.split("\n")
s.test_files = `git ls-files -- {test,spec,features}/*`.split("\n")
s.require_paths = ["lib"]
s.add_dependency "redis", ">= 4.2"
s.add_dependency "sinatra", ">= 1.2.6"
s.add_dependency "rubystats", ">= 0.3.0"
s.add_development_dependency "bundler", ">= 1.17"
s.add_development_dependency "simplecov", "~> 0.15"
s.add_development_dependency "rack-test", "~> 2.0"
s.add_development_dependency "rake", "~> 13"
s.add_development_dependency "rspec", "~> 3.7"
s.add_development_dependency "pry", "~> 0.10"
s.add_development_dependency "rails", ">= 5.0"
end
<MSG> Drop support for ruby 1.9.3
<DFF> @@ -22,7 +22,7 @@ Gem::Specification.new do |s|
"mailing_list_uri" => "https://groups.google.com/d/forum/split-ruby"
}
- s.required_ruby_version = '>= 1.9.3'
+ s.required_ruby_version = '>= 2.2.2'
s.required_rubygems_version = '>= 2.0.0'
s.files = `git ls-files`.split("\n")
| 1 | Drop support for ruby 1.9.3 | 1 | .gemspec | gemspec | mit | splitrb/split |
10071106 | <NME> webpack.config.js
<BEF> var path = require('path');
var webpack = require('webpack');
var VERSION = require('./package.json').version;
var banner =
'/*!\n' +
' * Semantic-UI AngularJS integration\n' +
' * https://github.com/semantic-org/semantic-ui-angular\n' +
' * @license MIT\n' +
' * v' + VERSION + '\n' +
' */\n';
module.exports = {
context: path.resolve('src'),
devtool: "source-map",
entry: {
'semantic-ui-angular': './index',
'semantic-ui-angular.min': './index'
},
output: {
path: path.resolve('dist'),
filename: '[name].js'
},
plugins: [
new webpack.optimize.UglifyJsPlugin({
include: /\.min\.js$/,
minimize: true
}),
new webpack.BannerPlugin(banner, {raw: true})
],
module: {
loaders: [
{ test: /\.ts?$/, exclude: /node_modules/, loader: 'ts-loader' },
{ test: /\.json?$/, exclude: /node_modules/, loader: 'json-loader' }
]
},
resolve: {
extensions: ['', '.ts', '.js']
}
extensions: ['', '.ts', '.js']
}
};
<MSG> chore(project): Add tslint support
<DFF> @@ -29,11 +29,17 @@ module.exports = {
new webpack.BannerPlugin(banner, {raw: true})
],
module: {
+ preLoaders: [
+ { test: /\.ts$/, exclude: /node_modules/, loader: 'tslint' }
+ ],
loaders: [
{ test: /\.ts?$/, exclude: /node_modules/, loader: 'ts-loader' },
{ test: /\.json?$/, exclude: /node_modules/, loader: 'json-loader' }
]
},
+ tslint: {
+ configuration: require('./tslint.json')
+ },
resolve: {
extensions: ['', '.ts', '.js']
}
| 6 | chore(project): Add tslint support | 0 | .js | config | mit | Semantic-Org/Semantic-UI-Angular |
10071107 | <NME> webpack.config.js
<BEF> var path = require('path');
var webpack = require('webpack');
var VERSION = require('./package.json').version;
var banner =
'/*!\n' +
' * Semantic-UI AngularJS integration\n' +
' * https://github.com/semantic-org/semantic-ui-angular\n' +
' * @license MIT\n' +
' * v' + VERSION + '\n' +
' */\n';
module.exports = {
context: path.resolve('src'),
devtool: "source-map",
entry: {
'semantic-ui-angular': './index',
'semantic-ui-angular.min': './index'
},
output: {
path: path.resolve('dist'),
filename: '[name].js'
},
plugins: [
new webpack.optimize.UglifyJsPlugin({
include: /\.min\.js$/,
minimize: true
}),
new webpack.BannerPlugin(banner, {raw: true})
],
module: {
loaders: [
{ test: /\.ts?$/, exclude: /node_modules/, loader: 'ts-loader' },
{ test: /\.json?$/, exclude: /node_modules/, loader: 'json-loader' }
]
},
resolve: {
extensions: ['', '.ts', '.js']
}
extensions: ['', '.ts', '.js']
}
};
<MSG> chore(project): Add tslint support
<DFF> @@ -29,11 +29,17 @@ module.exports = {
new webpack.BannerPlugin(banner, {raw: true})
],
module: {
+ preLoaders: [
+ { test: /\.ts$/, exclude: /node_modules/, loader: 'tslint' }
+ ],
loaders: [
{ test: /\.ts?$/, exclude: /node_modules/, loader: 'ts-loader' },
{ test: /\.json?$/, exclude: /node_modules/, loader: 'json-loader' }
]
},
+ tslint: {
+ configuration: require('./tslint.json')
+ },
resolve: {
extensions: ['', '.ts', '.js']
}
| 6 | chore(project): Add tslint support | 0 | .js | config | mit | Semantic-Org/Semantic-UI-Angular |
10071108 | <NME> webpack.config.js
<BEF> var path = require('path');
var webpack = require('webpack');
var VERSION = require('./package.json').version;
var banner =
'/*!\n' +
' * Semantic-UI AngularJS integration\n' +
' * https://github.com/semantic-org/semantic-ui-angular\n' +
' * @license MIT\n' +
' * v' + VERSION + '\n' +
' */\n';
module.exports = {
context: path.resolve('src'),
devtool: "source-map",
entry: {
'semantic-ui-angular': './index',
'semantic-ui-angular.min': './index'
},
output: {
path: path.resolve('dist'),
filename: '[name].js'
},
plugins: [
new webpack.optimize.UglifyJsPlugin({
include: /\.min\.js$/,
minimize: true
}),
new webpack.BannerPlugin(banner, {raw: true})
],
module: {
loaders: [
{ test: /\.ts?$/, exclude: /node_modules/, loader: 'ts-loader' },
{ test: /\.json?$/, exclude: /node_modules/, loader: 'json-loader' }
]
},
resolve: {
extensions: ['', '.ts', '.js']
}
extensions: ['', '.ts', '.js']
}
};
<MSG> chore(project): Add tslint support
<DFF> @@ -29,11 +29,17 @@ module.exports = {
new webpack.BannerPlugin(banner, {raw: true})
],
module: {
+ preLoaders: [
+ { test: /\.ts$/, exclude: /node_modules/, loader: 'tslint' }
+ ],
loaders: [
{ test: /\.ts?$/, exclude: /node_modules/, loader: 'ts-loader' },
{ test: /\.json?$/, exclude: /node_modules/, loader: 'json-loader' }
]
},
+ tslint: {
+ configuration: require('./tslint.json')
+ },
resolve: {
extensions: ['', '.ts', '.js']
}
| 6 | chore(project): Add tslint support | 0 | .js | config | mit | Semantic-Org/Semantic-UI-Angular |
10071109 | <NME> README.md
<BEF> # [Split](https://libraries.io/rubygems/split)
[](http://badge.fury.io/rb/split)

[](https://codeclimate.com/github/splitrb/split)
[](https://codeclimate.com/github/splitrb/split/coverage)
[](https://github.com/RichardLitt/standard-readme)
[](https://www.codetriage.com/splitrb/split)
> 📈 The Rack Based A/B testing framework https://libraries.io/rubygems/split
Split is a rack based A/B testing framework designed to work with Rails, Sinatra or any other rack based app.
Split is heavily inspired by the [Abingo](https://github.com/ryanb/abingo) and [Vanity](https://github.com/assaf/vanity) Rails A/B testing plugins and [Resque](https://github.com/resque/resque) in its use of Redis.
Split is designed to be hacker friendly, allowing for maximum customisation and extensibility.
## Install
### Requirements
Split v4.0+ is currently tested with Ruby >= 2.5 and Rails >= 5.2.
If your project requires compatibility with Ruby 2.4.x or older Rails versions. You can try v3.0 or v0.8.0(for Ruby 1.9.3)
Split uses Redis as a datastore.
Split only supports Redis 4.0 or greater.
If you're on OS X, Homebrew is the simplest way to install Redis:
```bash
brew install redis
redis-server /usr/local/etc/redis.conf
```
You now have a Redis daemon running on port `6379`.
### Setup
```bash
gem install split
```
#### Rails
Adding `gem 'split'` to your Gemfile will autoload it when rails starts up, as long as you've configured Redis it will 'just work'.
#### Sinatra
To configure Sinatra with Split you need to enable sessions and mix in the helper methods. Add the following lines at the top of your Sinatra app:
```ruby
require 'split'
class MySinatraApp < Sinatra::Base
enable :sessions
helpers Split::Helper
get '/' do
...
end
```
## Usage
To begin your A/B test use the `ab_test` method, naming your experiment with the first argument and then the different alternatives which you wish to test on as the other arguments.
`ab_test` returns one of the alternatives, if a user has already seen that test they will get the same alternative as before, which you can use to split your code on.
It can be used to render different templates, show different text or any other case based logic.
`ab_finished` is used to make a completion of an experiment, or conversion.
Example: View
```erb
<% ab_test(:login_button, "/images/button1.jpg", "/images/button2.jpg") do |button_file| %>
<%= image_tag(button_file, alt: "Login!") %>
<% end %>
```
Example: Controller
```ruby
def register_new_user
# See what level of free points maximizes users' decision to buy replacement points.
@starter_points = ab_test(:new_user_free_points, '100', '200', '300')
end
```
Example: Conversion tracking (in a controller!)
```ruby
def buy_new_points
# some business logic
ab_finished(:new_user_free_points)
end
```
Example: Conversion tracking (in a view)
```erb
Thanks for signing up, dude! <% ab_finished(:signup_page_redesign) %>
```
You can find more examples, tutorials and guides on the [wiki](https://github.com/splitrb/split/wiki).
## Statistical Validity
Split has two options for you to use to determine which alternative is the best.
The first option (default on the dashboard) uses a z test (n>30) for the difference between your control and alternative conversion rates to calculate statistical significance. This test will tell you whether an alternative is better or worse than your control, but it will not distinguish between which alternative is the best in an experiment with multiple alternatives. Split will only tell you if your experiment is 90%, 95%, or 99% significant, and this test only works if you have more than 30 participants and 5 conversions for each branch.
As per this [blog post](https://www.evanmiller.org/how-not-to-run-an-ab-test.html) on the pitfalls of A/B testing, it is highly recommended that you determine your requisite sample size for each branch before running the experiment. Otherwise, you'll have an increased rate of false positives (experiments which show a significant effect where really there is none).
[Here](https://www.evanmiller.org/ab-testing/sample-size.html) is a sample size calculator for your convenience.
The second option uses simulations from a beta distribution to determine the probability that the given alternative is the winner compared to all other alternatives. You can view these probabilities by clicking on the drop-down menu labeled "Confidence." This option should be used when the experiment has more than just 1 control and 1 alternative. It can also be used for a simple, 2-alternative A/B test.
Calculating the beta-distribution simulations for a large number of experiments can be slow, so the results are cached. You can specify how often they should be recalculated (the default is once per day).
```ruby
Split.configure do |config|
config.winning_alternative_recalculation_interval = 3600 # 1 hour
end
```
## Extras
### Weighted alternatives
Perhaps you only want to show an alternative to 10% of your visitors because it is very experimental or not yet fully load tested.
To do this you can pass a weight with each alternative in the following ways:
```ruby
ab_test(:homepage_design, {'Old' => 18}, {'New' => 2})
ab_test(:homepage_design, 'Old', {'New' => 1.0/9})
ab_test(:homepage_design, {'Old' => 9}, 'New')
```
This will only show the new alternative to visitors 1 in 10 times, the default weight for an alternative is 1.
### Overriding alternatives
For development and testing, you may wish to force your app to always return an alternative.
You can do this by passing it as a parameter in the url.
If you have an experiment called `button_color` with alternatives called `red` and `blue` used on your homepage, a url such as:
http://myawesomesite.com?ab_test[button_color]=red
will always have red buttons. This won't be stored in your session or count towards to results, unless you set the `store_override` configuration option.
In the event you want to disable all tests without having to know the individual experiment names, add a `SPLIT_DISABLE` query parameter.
http://myawesomesite.com?SPLIT_DISABLE=true
It is not required to send `SPLIT_DISABLE=false` to activate Split.
### Rspec Helper
To aid testing with RSpec, write `spec/support/split_helper.rb` and call `use_ab_test(alternatives_by_experiment)` in your specs as instructed below:
```ruby
# Create a file with these contents at 'spec/support/split_helper.rb'
# and ensure it is `require`d in your rails_helper.rb or spec_helper.rb
module SplitHelper
# Force a specific experiment alternative to always be returned:
# use_ab_test(signup_form: "single_page")
#
# Force alternatives for multiple experiments:
# use_ab_test(signup_form: "single_page", pricing: "show_enterprise_prices")
#
def use_ab_test(alternatives_by_experiment)
allow_any_instance_of(Split::Helper).to receive(:ab_test) do |_receiver, experiment, &block|
variant = alternatives_by_experiment.fetch(experiment) { |key| raise "Unknown experiment '#{key}'" }
block.call(variant) unless block.nil?
variant
end
end
end
# Make the `use_ab_test` method available to all specs:
RSpec.configure do |config|
config.include SplitHelper
end
```
Now you can call `use_ab_test(alternatives_by_experiment)` in your specs, for example:
```ruby
it "registers using experimental signup" do
use_ab_test experiment_name: "alternative_name"
post "/signups"
...
end
```
### Starting experiments manually
By default new A/B tests will be active right after deployment. In case you would like to start new test a while after
the deploy, you can do it by setting the `start_manually` configuration option to `true`.
After choosing this option tests won't be started right after deploy, but after pressing the `Start` button in Split admin dashboard. If a test is deleted from the Split dashboard, then it can only be started after pressing the `Start` button whenever being re-initialized.
### Reset after completion
When a user completes a test their session is reset so that they may start the test again in the future.
To stop this behaviour you can pass the following option to the `ab_finished` method:
```ruby
ab_finished(:experiment_name, reset: false)
```
The user will then always see the alternative they started with.
Any old unfinished experiment key will be deleted from the user's data storage if the experiment had been removed or is over and a winner had been chosen. This allows a user to enroll into any new experiment in cases when the `allow_multiple_experiments` config option is set to `false`.
### Reset experiments manually
By default Split automatically resets the experiment whenever it detects the configuration for an experiment has changed (e.g. you call `ab_test` with different alternatives). You can prevent this by setting the option `reset_manually` to `true`.
You may want to do this when you want to change something, like the variants' names, the metadata about an experiment, etc. without resetting everything.
### Multiple experiments at once
By default Split will avoid users participating in multiple experiments at once. This means you are less likely to skew results by adding in more variation to your tests.
To stop this behaviour and allow users to participate in multiple experiments at once set the `allow_multiple_experiments` config option to true like so:
```ruby
Split.configure do |config|
config.allow_multiple_experiments = true
end
```
This will allow the user to participate in any number of experiments and belong to any alternative in each experiment. This has the possible downside of a variation in one experiment influencing the outcome of another.
To address this, setting the `allow_multiple_experiments` config option to 'control' like so:
```ruby
Split.configure do |config|
config.allow_multiple_experiments = 'control'
end
```
For this to work, each and every experiment you define must have an alternative named 'control'. This will allow the user to participate in multiple experiments as long as the user belongs to the alternative 'control' in each experiment. As soon as the user belongs to an alternative named something other than 'control' the user may not participate in any more experiments. Calling ab_test(<other experiments>) will always return the first alternative without adding the user to that experiment.
### Experiment Persistence
Split comes with three built-in persistence adapters for storing users and the alternatives they've been given for each experiment.
By default Split will store the tests for each user in the session.
You can optionally configure Split to use a cookie, Redis, or any custom adapter of your choosing.
#### Cookies
```ruby
Split.configure do |config|
config.persistence = :cookie
end
```
When using the cookie persistence, Split stores data into an anonymous tracking cookie named 'split', which expires in 1 year. To change that, set the `persistence_cookie_length` in the configuration (unit of time in seconds).
```ruby
Split.configure do |config|
config.persistence = :cookie
config.persistence_cookie_length = 2592000 # 30 days
end
```
The data stored consists of the experiment name and the variants the user is in. Example: { "experiment_name" => "variant_a" }
__Note:__ Using cookies depends on `ActionDispatch::Cookies` or any identical API
#### Redis
Using Redis will allow ab_users to persist across sessions or machines.
```ruby
Split.configure do |config|
config.persistence = Split::Persistence::RedisAdapter.with_config(lookup_by: -> (context) { context.current_user_id })
# Equivalent
# config.persistence = Split::Persistence::RedisAdapter.with_config(lookup_by: :current_user_id)
end
```
Options:
* `lookup_by`: method to invoke per request for uniquely identifying ab_users (mandatory configuration)
* `namespace`: separate namespace to store these persisted values (default "persistence")
* `expire_seconds`: sets TTL for user key. (if a user is in multiple experiments most recent update will reset TTL for all their assignments)
#### Dual Adapter
The Dual Adapter allows the use of different persistence adapters for logged-in and logged-out users. A common use case is to use Redis for logged-in users and Cookies for logged-out users.
```ruby
cookie_adapter = Split::Persistence::CookieAdapter
redis_adapter = Split::Persistence::RedisAdapter.with_config(
lookup_by: -> (context) { context.send(:current_user).try(:id) },
expire_seconds: 2592000)
Split.configure do |config|
config.persistence = Split::Persistence::DualAdapter.with_config(
logged_in: -> (context) { !context.send(:current_user).try(:id).nil? },
logged_in_adapter: redis_adapter,
logged_out_adapter: cookie_adapter)
config.persistence_cookie_length = 2592000 # 30 days
end
```
#### Custom Adapter
Your custom adapter needs to implement the same API as existing adapters.
See `Split::Persistence::CookieAdapter` or `Split::Persistence::SessionAdapter` for a starting point.
```ruby
Split.configure do |config|
config.persistence = YourCustomAdapterClass
end
```
### Trial Event Hooks
You can define methods that will be called at the same time as experiment
alternative participation and goal completion.
For example:
``` ruby
Split.configure do |config|
config.on_trial = :log_trial # run on every trial
config.on_trial_choose = :log_trial_choose # run on trials with new users only
config.on_trial_complete = :log_trial_complete
end
```
Set these attributes to a method name available in the same context as the
`ab_test` method. These methods should accept one argument, a `Trial` instance.
``` ruby
def log_trial(trial)
logger.info "experiment=%s alternative=%s user=%s" %
[ trial.experiment.name, trial.alternative, current_user.id ]
end
def log_trial_choose(trial)
logger.info "[new user] experiment=%s alternative=%s user=%s" %
[ trial.experiment.name, trial.alternative, current_user.id ]
end
def log_trial_complete(trial)
logger.info "experiment=%s alternative=%s user=%s complete=true" %
[ trial.experiment.name, trial.alternative, current_user.id ]
end
```
#### Views
If you are running `ab_test` from a view, you must define your event
hook callback as a
[helper_method](https://apidock.com/rails/AbstractController/Helpers/ClassMethods/helper_method)
in the controller:
``` ruby
helper_method :log_trial_choose
def log_trial_choose(trial)
logger.info "experiment=%s alternative=%s user=%s" %
[ trial.experiment.name, trial.alternative, current_user.id ]
end
```
### Experiment Hooks
You can assign a proc that will be called when an experiment is reset or deleted. You can use these hooks to call methods within your application to keep data related to experiments in sync with Split.
For example:
``` ruby
Split.configure do |config|
# after experiment reset or deleted
config.on_experiment_reset = -> (example) { # Do something on reset }
config.on_experiment_delete = -> (experiment) { # Do something else on delete }
# before experiment reset or deleted
config.on_before_experiment_reset = -> (example) { # Do something on reset }
config.on_before_experiment_delete = -> (experiment) { # Do something else on delete }
# after experiment winner had been set
config.on_experiment_winner_choose = -> (experiment) { # Do something on winner choose }
end
```
## Web Interface
Split comes with a Sinatra-based front end to get an overview of how your experiments are doing.
If you are running Rails 2: You can mount this inside your app using Rack::URLMap in your `config.ru`
```ruby
require 'split/dashboard'
run Rack::URLMap.new \
"/" => Your::App.new,
"/split" => Split::Dashboard.new
```
However, if you are using Rails 3 or higher: You can mount this inside your app routes by first adding this to the Gemfile:
```ruby
gem 'split', require: 'split/dashboard'
```
Then adding this to config/routes.rb
```ruby
mount Split::Dashboard, at: 'split'
```
You may want to password protect that page, you can do so with `Rack::Auth::Basic` (in your split initializer file)
```ruby
# Rails apps or apps that already depend on activesupport
Split::Dashboard.use Rack::Auth::Basic do |username, password|
# Protect against timing attacks:
# - Use & (do not use &&) so that it doesn't short circuit.
# - Use digests to stop length information leaking
ActiveSupport::SecurityUtils.secure_compare(::Digest::SHA256.hexdigest(username), ::Digest::SHA256.hexdigest(ENV["SPLIT_USERNAME"])) &
ActiveSupport::SecurityUtils.secure_compare(::Digest::SHA256.hexdigest(password), ::Digest::SHA256.hexdigest(ENV["SPLIT_PASSWORD"]))
end
# Apps without activesupport
Split::Dashboard.use Rack::Auth::Basic do |username, password|
# Protect against timing attacks:
# - Use & (do not use &&) so that it doesn't short circuit.
# - Use digests to stop length information leaking
Rack::Utils.secure_compare(::Digest::SHA256.hexdigest(username), ::Digest::SHA256.hexdigest(ENV["SPLIT_USERNAME"])) &
Rack::Utils.secure_compare(::Digest::SHA256.hexdigest(password), ::Digest::SHA256.hexdigest(ENV["SPLIT_PASSWORD"]))
end
```
You can even use Devise or any other Warden-based authentication method to authorize users. Just replace `mount Split::Dashboard, :at => 'split'` in `config/routes.rb` with the following:
```ruby
match "/split" => Split::Dashboard, anchor: false, via: [:get, :post, :delete], constraints: -> (request) do
request.env['warden'].authenticated? # are we authenticated?
request.env['warden'].authenticate! # authenticate if not already
# or even check any other condition such as request.env['warden'].user.is_admin?
end
meta:
a:
text: "Have a fantastic day"
b:
text: "Don't get hit by a bus"
```
This allows for some advanced experiment configuration using methods like:
## Configuration
You can override the default configuration options of Split like so:
trial.metadata['text'] # => "Have a fantastic day"
```
#### Metrics
You might wish to track generic metrics, such as conversions, and use
defaults to `redis://localhost:6379` if not specified by configure block.
On platforms like Heroku, Split will use the value of `REDIS_PROVIDER` to
determine which env variable key to use when retrieving the host config. This
defaults to `REDIS_URL`.
### Filtering
In most scenarios you don't want to have AB-Testing enabled for web spiders, robots or special groups of users.
Split provides functionality to filter this based on a predefined, extensible list of bots, IP-lists or custom exclude logic.
```ruby
Split.configure do |config|
# bot config
config.robot_regex = /my_custom_robot_regex/ # or
config.bots['newbot'] = "Description for bot with 'newbot' user agent, which will be added to config.robot_regex for exclusion"
# IP config
config.ignore_ip_addresses << '81.19.48.130' # or regex: /81\.19\.48\.[0-9]+/
# or provide your own filter functionality, the default is proc{ |request| is_robot? || is_ignored_ip_address? || is_preview? }
config.ignore_filter = -> (request) { CustomExcludeLogic.excludes?(request) }
end
```
### Experiment configuration
Instead of providing the experiment options inline, you can store them
in a hash. This hash can control your experiment's alternatives, weights,
algorithm and if the experiment resets once finished:
```ruby
Split.configure do |config|
config.experiments = {
my_first_experiment: {
alternatives: ["a", "b"],
resettable: false
},
:my_second_experiment => {
algorithm: 'Split::Algorithms::Whiplash',
alternatives: [
{ name: "a", percent: 67 },
{ name: "b", percent: 33 }
]
}
}
end
```
You can also store your experiments in a YAML file:
```ruby
Split.configure do |config|
config.experiments = YAML.load_file "config/experiments.yml"
end
```
You can then define the YAML file like:
```yaml
my_first_experiment:
alternatives:
- a
- b
my_second_experiment:
alternatives:
- name: a
percent: 67
- name: b
percent: 33
resettable: false
```
This simplifies the calls from your code:
```ruby
ab_test(:my_first_experiment)
```
and:
```ruby
ab_finished(:my_first_experiment)
```
You can also add meta data for each experiment, which is very useful when you need more than an alternative name to change behaviour:
```ruby
Split.configure do |config|
config.experiments = {
my_first_experiment: {
alternatives: ["a", "b"],
metadata: {
"a" => {"text" => "Have a fantastic day"},
"b" => {"text" => "Don't get hit by a bus"}
}
}
}
end
```
```yaml
my_first_experiment:
alternatives:
- a
- b
metadata:
a:
text: "Have a fantastic day"
b:
text: "Don't get hit by a bus"
```
This allows for some advanced experiment configuration using methods like:
```ruby
trial.alternative.name # => "a"
trial.metadata['text'] # => "Have a fantastic day"
```
or in views:
```erb
<% ab_test("my_first_experiment") do |alternative, meta| %>
<%= alternative %>
<small><%= meta['text'] %></small>
<% end %>
```
The keys used in meta data should be Strings
#### Metrics
You might wish to track generic metrics, such as conversions, and use
those to complete multiple different experiments without adding more to
your code. You can use the configuration hash to do this, thanks to
the `:metric` option.
```ruby
Split.configure do |config|
config.experiments = {
my_first_experiment: {
alternatives: ["a", "b"],
metric: :my_metric
}
}
end
```
Your code may then track a completion using the metric instead of
the experiment name:
```ruby
ab_finished(:my_metric)
```
You can also create a new metric by instantiating and saving a new Metric object.
```ruby
Split::Metric.new(:my_metric)
Split::Metric.save
```
#### Goals
You might wish to allow an experiment to have multiple, distinguishable goals.
The API to define goals for an experiment is this:
```ruby
ab_test({link_color: ["purchase", "refund"]}, "red", "blue")
```
or you can define them in a configuration file:
```ruby
Split.configure do |config|
config.experiments = {
link_color: {
alternatives: ["red", "blue"],
goals: ["purchase", "refund"]
}
}
end
```
To complete a goal conversion, you do it like:
```ruby
ab_finished(link_color: "purchase")
```
Note that if you pass additional options, that should be a separate hash:
```ruby
ab_finished({ link_color: "purchase" }, reset: false)
```
**NOTE:** This does not mean that a single experiment can complete more than one goal.
Once you finish one of the goals, the test is considered to be completed, and finishing the other goal will no longer register. (Assuming the test runs with `reset: false`.)
**Good Example**: Test if listing Plan A first result in more conversions to Plan A (goal: "plana_conversion") or Plan B (goal: "planb_conversion").
**Bad Example**: Test if button color increases conversion rate through multiple steps of a funnel. THIS WILL NOT WORK.
**Bad Example**: Test both how button color affects signup *and* how it affects login, at the same time. THIS WILL NOT WORK.
#### Combined Experiments
If you want to test how button color affects signup *and* how it affects login at the same time, use combined experiments.
Configure like so:
```ruby
Split.configuration.experiments = {
:button_color_experiment => {
:alternatives => ["blue", "green"],
:combined_experiments => ["button_color_on_signup", "button_color_on_login"]
}
}
```
Starting the combined test starts all combined experiments
```ruby
ab_combined_test(:button_color_experiment)
```
Finish each combined test as normal
```ruby
ab_finished(:button_color_on_login)
ab_finished(:button_color_on_signup)
```
**Additional Configuration**:
* Be sure to enable `allow_multiple_experiments`
* In Sinatra include the CombinedExperimentsHelper
```
helpers Split::CombinedExperimentsHelper
```
### DB failover solution
Due to the fact that Redis has no automatic failover mechanism, it's
possible to switch on the `db_failover` config option, so that `ab_test`
and `ab_finished` will not crash in case of a db failure. `ab_test` always
delivers alternative A (the first one) in that case.
It's also possible to set a `db_failover_on_db_error` callback (proc)
for example to log these errors via Rails.logger.
### Redis
You may want to change the Redis host and port Split connects to, or
set various other options at startup.
Split has a `redis` setter which can be given a string or a Redis
object. This means if you're already using Redis in your app, Split
can re-use the existing connection.
String: `Split.redis = 'redis://localhost:6379'`
Redis: `Split.redis = $redis`
For our rails app we have a `config/initializers/split.rb` file where
we load `config/split.yml` by hand and set the Redis information
appropriately.
Here's our `config/split.yml`:
```yml
development: redis://localhost:6379
test: redis://localhost:6379
staging: redis://redis1.example.com:6379
fi: redis://localhost:6379
production: redis://redis1.example.com:6379
```
And our initializer:
```ruby
split_config = YAML.load_file(Rails.root.join('config', 'split.yml'))
Split.redis = split_config[Rails.env]
```
### Redis Caching (v4.0+)
In some high-volume usage scenarios, Redis load can be incurred by repeated
fetches for fairly static data. Enabling caching will reduce this load.
```ruby
Split.configuration.cache = true
````
This currently caches:
- `Split::ExperimentCatalog.find`
- `Split::Experiment.start_time`
- `Split::Experiment.winner`
## Namespaces
If you're running multiple, separate instances of Split you may want
to namespace the keyspaces so they do not overlap. This is not unlike
the approach taken by many memcached clients.
This feature can be provided by the [redis-namespace](https://github.com/defunkt/redis-namespace)
library. To configure Split to use `Redis::Namespace`, do the following:
1. Add `redis-namespace` to your Gemfile:
```ruby
gem 'redis-namespace'
```
2. Configure `Split.redis` to use a `Redis::Namespace` instance (possible in an
initializer):
```ruby
redis = Redis.new(url: ENV['REDIS_URL']) # or whatever config you want
Split.redis = Redis::Namespace.new(:your_namespace, redis: redis)
```
## Outside of a Web Session
Split provides the Helper module to facilitate running experiments inside web sessions.
Alternatively, you can access the underlying Metric, Trial, Experiment and Alternative objects to
conduct experiments that are not tied to a web session.
```ruby
# create a new experiment
experiment = Split::ExperimentCatalog.find_or_create('color', 'red', 'blue')
# create a new trial
trial = Split::Trial.new(:experiment => experiment)
# run trial
trial.choose!
# get the result, returns either red or blue
trial.alternative.name
# if the goal has been achieved, increment the successful completions for this alternative.
if goal_achieved?
trial.complete!
end
```
## Algorithms
By default, Split ships with `Split::Algorithms::WeightedSample` that randomly selects from possible alternatives for a traditional a/b test.
It is possible to specify static weights to favor certain alternatives.
`Split::Algorithms::Whiplash` is an implementation of a [multi-armed bandit algorithm](http://stevehanov.ca/blog/index.php?id=132).
This algorithm will automatically weight the alternatives based on their relative performance,
choosing the better-performing ones more often as trials are completed.
`Split::Algorithms::BlockRandomization` is an algorithm that ensures equal
participation across all alternatives. This algorithm will choose the alternative
with the fewest participants. In the event of multiple minimum participant alternatives
(i.e. starting a new "Block") the algorithm will choose a random alternative from
those minimum participant alternatives.
Users may also write their own algorithms. The default algorithm may be specified globally in the configuration file, or on a per experiment basis using the experiments hash of the configuration file.
To change the algorithm globally for all experiments, use the following in your initializer:
```ruby
Split.configure do |config|
config.algorithm = Split::Algorithms::Whiplash
end
```
## Extensions
- [Split::Export](https://github.com/splitrb/split-export) - Easily export A/B test data out of Split.
- [Split::Analytics](https://github.com/splitrb/split-analytics) - Push test data to Google Analytics.
- [Split::Mongoid](https://github.com/MongoHQ/split-mongoid) - Store experiment data in mongoid (still uses redis).
- [Split::Cacheable](https://github.com/harrystech/split_cacheable) - Automatically create cache buckets per test.
- [Split::Counters](https://github.com/bernardkroes/split-counters) - Add counters per experiment and alternative.
- [Split::Cli](https://github.com/craigmcnamara/split-cli) - A CLI to trigger Split A/B tests.
## Screencast
Ryan bates has produced an excellent 10 minute screencast about split on the Railscasts site: [A/B Testing with Split](http://railscasts.com/episodes/331-a-b-testing-with-split)
## Blogposts
* [Recipe: A/B testing with KISSMetrics and the split gem](https://robots.thoughtbot.com/post/9595887299/recipe-a-b-testing-with-kissmetrics-and-the-split-gem)
* [Rails A/B testing with Split on Heroku](http://blog.nathanhumbert.com/2012/02/rails-ab-testing-with-split-on-heroku.html)
## Backers
Support us with a monthly donation and help us continue our activities. [[Become a backer](https://opencollective.com/split#backer)]
<a href="https://opencollective.com/split/backer/0/website" target="_blank"><img src="https://opencollective.com/split/backer/0/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/1/website" target="_blank"><img src="https://opencollective.com/split/backer/1/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/2/website" target="_blank"><img src="https://opencollective.com/split/backer/2/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/3/website" target="_blank"><img src="https://opencollective.com/split/backer/3/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/4/website" target="_blank"><img src="https://opencollective.com/split/backer/4/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/5/website" target="_blank"><img src="https://opencollective.com/split/backer/5/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/6/website" target="_blank"><img src="https://opencollective.com/split/backer/6/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/7/website" target="_blank"><img src="https://opencollective.com/split/backer/7/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/8/website" target="_blank"><img src="https://opencollective.com/split/backer/8/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/9/website" target="_blank"><img src="https://opencollective.com/split/backer/9/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/10/website" target="_blank"><img src="https://opencollective.com/split/backer/10/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/11/website" target="_blank"><img src="https://opencollective.com/split/backer/11/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/12/website" target="_blank"><img src="https://opencollective.com/split/backer/12/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/13/website" target="_blank"><img src="https://opencollective.com/split/backer/13/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/14/website" target="_blank"><img src="https://opencollective.com/split/backer/14/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/15/website" target="_blank"><img src="https://opencollective.com/split/backer/15/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/16/website" target="_blank"><img src="https://opencollective.com/split/backer/16/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/17/website" target="_blank"><img src="https://opencollective.com/split/backer/17/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/18/website" target="_blank"><img src="https://opencollective.com/split/backer/18/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/19/website" target="_blank"><img src="https://opencollective.com/split/backer/19/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/20/website" target="_blank"><img src="https://opencollective.com/split/backer/20/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/21/website" target="_blank"><img src="https://opencollective.com/split/backer/21/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/22/website" target="_blank"><img src="https://opencollective.com/split/backer/22/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/23/website" target="_blank"><img src="https://opencollective.com/split/backer/23/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/24/website" target="_blank"><img src="https://opencollective.com/split/backer/24/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/25/website" target="_blank"><img src="https://opencollective.com/split/backer/25/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/26/website" target="_blank"><img src="https://opencollective.com/split/backer/26/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/27/website" target="_blank"><img src="https://opencollective.com/split/backer/27/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/28/website" target="_blank"><img src="https://opencollective.com/split/backer/28/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/29/website" target="_blank"><img src="https://opencollective.com/split/backer/29/avatar.svg"></a>
## Sponsors
Become a sponsor and get your logo on our README on Github with a link to your site. [[Become a sponsor](https://opencollective.com/split#sponsor)]
<a href="https://opencollective.com/split/sponsor/0/website" target="_blank"><img src="https://opencollective.com/split/sponsor/0/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/1/website" target="_blank"><img src="https://opencollective.com/split/sponsor/1/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/2/website" target="_blank"><img src="https://opencollective.com/split/sponsor/2/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/3/website" target="_blank"><img src="https://opencollective.com/split/sponsor/3/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/4/website" target="_blank"><img src="https://opencollective.com/split/sponsor/4/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/5/website" target="_blank"><img src="https://opencollective.com/split/sponsor/5/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/6/website" target="_blank"><img src="https://opencollective.com/split/sponsor/6/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/7/website" target="_blank"><img src="https://opencollective.com/split/sponsor/7/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/8/website" target="_blank"><img src="https://opencollective.com/split/sponsor/8/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/9/website" target="_blank"><img src="https://opencollective.com/split/sponsor/9/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/10/website" target="_blank"><img src="https://opencollective.com/split/sponsor/10/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/11/website" target="_blank"><img src="https://opencollective.com/split/sponsor/11/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/12/website" target="_blank"><img src="https://opencollective.com/split/sponsor/12/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/13/website" target="_blank"><img src="https://opencollective.com/split/sponsor/13/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/14/website" target="_blank"><img src="https://opencollective.com/split/sponsor/14/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/15/website" target="_blank"><img src="https://opencollective.com/split/sponsor/15/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/16/website" target="_blank"><img src="https://opencollective.com/split/sponsor/16/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/17/website" target="_blank"><img src="https://opencollective.com/split/sponsor/17/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/18/website" target="_blank"><img src="https://opencollective.com/split/sponsor/18/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/19/website" target="_blank"><img src="https://opencollective.com/split/sponsor/19/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/20/website" target="_blank"><img src="https://opencollective.com/split/sponsor/20/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/21/website" target="_blank"><img src="https://opencollective.com/split/sponsor/21/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/22/website" target="_blank"><img src="https://opencollective.com/split/sponsor/22/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/23/website" target="_blank"><img src="https://opencollective.com/split/sponsor/23/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/24/website" target="_blank"><img src="https://opencollective.com/split/sponsor/24/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/25/website" target="_blank"><img src="https://opencollective.com/split/sponsor/25/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/26/website" target="_blank"><img src="https://opencollective.com/split/sponsor/26/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/27/website" target="_blank"><img src="https://opencollective.com/split/sponsor/27/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/28/website" target="_blank"><img src="https://opencollective.com/split/sponsor/28/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/29/website" target="_blank"><img src="https://opencollective.com/split/sponsor/29/avatar.svg"></a>
## Contribute
Please do! Over 70 different people have contributed to the project, you can see them all here: https://github.com/splitrb/split/graphs/contributors.
### Development
The source code is hosted at [GitHub](https://github.com/splitrb/split).
Report issues and feature requests on [GitHub Issues](https://github.com/splitrb/split/issues).
You can find a discussion form on [Google Groups](https://groups.google.com/d/forum/split-ruby).
### Tests
Run the tests like this:
# Start a Redis server in another tab.
redis-server
bundle
rake spec
### A Note on Patches and Pull Requests
* Fork the project.
* Make your feature addition or bug fix.
* Add tests for it. This is important so I don't break it in a
future version unintentionally.
* Add documentation if necessary.
* Commit. Do not mess with the rakefile, version, or history.
(If you want to have your own version, that is fine. But bump the version in a commit by itself, which I can ignore when I pull.)
* Send a pull request. Bonus points for topic branches.
### Code of Conduct
Please note that this project is released with a [Contributor Code of Conduct](CODE_OF_CONDUCT.md). By participating in this project you agree to abide by its terms.
## Copyright
[MIT License](LICENSE) © 2019 [Andrew Nesbitt](https://github.com/andrew).
<MSG> Merge pull request #297 from ekorneeff/master
Update README section about metadata
<DFF> @@ -455,8 +455,8 @@ my_first_experiment:
meta:
a:
text: "Have a fantastic day"
- b:
- text: "Don't get hit by a bus"
+ b:
+ text: "Don't get hit by a bus"
```
This allows for some advanced experiment configuration using methods like:
@@ -467,6 +467,15 @@ trial.alternative.name # => "a"
trial.metadata['text'] # => "Have a fantastic day"
```
+or in views:
+
+```erb
+<% ab_test("my_first_experiment") do |alternative, meta| %>
+ <%= alternative %>
+ <small><%= meta['text'] %></small>
+<% end %>
+```
+
#### Metrics
You might wish to track generic metrics, such as conversions, and use
| 11 | Merge pull request #297 from ekorneeff/master | 2 | .md | md | mit | splitrb/split |
10071110 | <NME> README.md
<BEF> # [Split](https://libraries.io/rubygems/split)
[](http://badge.fury.io/rb/split)

[](https://codeclimate.com/github/splitrb/split)
[](https://codeclimate.com/github/splitrb/split/coverage)
[](https://github.com/RichardLitt/standard-readme)
[](https://www.codetriage.com/splitrb/split)
> 📈 The Rack Based A/B testing framework https://libraries.io/rubygems/split
Split is a rack based A/B testing framework designed to work with Rails, Sinatra or any other rack based app.
Split is heavily inspired by the [Abingo](https://github.com/ryanb/abingo) and [Vanity](https://github.com/assaf/vanity) Rails A/B testing plugins and [Resque](https://github.com/resque/resque) in its use of Redis.
Split is designed to be hacker friendly, allowing for maximum customisation and extensibility.
## Install
### Requirements
Split v4.0+ is currently tested with Ruby >= 2.5 and Rails >= 5.2.
If your project requires compatibility with Ruby 2.4.x or older Rails versions. You can try v3.0 or v0.8.0(for Ruby 1.9.3)
Split uses Redis as a datastore.
Split only supports Redis 4.0 or greater.
If you're on OS X, Homebrew is the simplest way to install Redis:
```bash
brew install redis
redis-server /usr/local/etc/redis.conf
```
You now have a Redis daemon running on port `6379`.
### Setup
```bash
gem install split
```
#### Rails
Adding `gem 'split'` to your Gemfile will autoload it when rails starts up, as long as you've configured Redis it will 'just work'.
#### Sinatra
To configure Sinatra with Split you need to enable sessions and mix in the helper methods. Add the following lines at the top of your Sinatra app:
```ruby
require 'split'
class MySinatraApp < Sinatra::Base
enable :sessions
helpers Split::Helper
get '/' do
...
end
```
## Usage
To begin your A/B test use the `ab_test` method, naming your experiment with the first argument and then the different alternatives which you wish to test on as the other arguments.
`ab_test` returns one of the alternatives, if a user has already seen that test they will get the same alternative as before, which you can use to split your code on.
It can be used to render different templates, show different text or any other case based logic.
`ab_finished` is used to make a completion of an experiment, or conversion.
Example: View
```erb
<% ab_test(:login_button, "/images/button1.jpg", "/images/button2.jpg") do |button_file| %>
<%= image_tag(button_file, alt: "Login!") %>
<% end %>
```
Example: Controller
```ruby
def register_new_user
# See what level of free points maximizes users' decision to buy replacement points.
@starter_points = ab_test(:new_user_free_points, '100', '200', '300')
end
```
Example: Conversion tracking (in a controller!)
```ruby
def buy_new_points
# some business logic
ab_finished(:new_user_free_points)
end
```
Example: Conversion tracking (in a view)
```erb
Thanks for signing up, dude! <% ab_finished(:signup_page_redesign) %>
```
You can find more examples, tutorials and guides on the [wiki](https://github.com/splitrb/split/wiki).
## Statistical Validity
Split has two options for you to use to determine which alternative is the best.
The first option (default on the dashboard) uses a z test (n>30) for the difference between your control and alternative conversion rates to calculate statistical significance. This test will tell you whether an alternative is better or worse than your control, but it will not distinguish between which alternative is the best in an experiment with multiple alternatives. Split will only tell you if your experiment is 90%, 95%, or 99% significant, and this test only works if you have more than 30 participants and 5 conversions for each branch.
As per this [blog post](https://www.evanmiller.org/how-not-to-run-an-ab-test.html) on the pitfalls of A/B testing, it is highly recommended that you determine your requisite sample size for each branch before running the experiment. Otherwise, you'll have an increased rate of false positives (experiments which show a significant effect where really there is none).
[Here](https://www.evanmiller.org/ab-testing/sample-size.html) is a sample size calculator for your convenience.
The second option uses simulations from a beta distribution to determine the probability that the given alternative is the winner compared to all other alternatives. You can view these probabilities by clicking on the drop-down menu labeled "Confidence." This option should be used when the experiment has more than just 1 control and 1 alternative. It can also be used for a simple, 2-alternative A/B test.
Calculating the beta-distribution simulations for a large number of experiments can be slow, so the results are cached. You can specify how often they should be recalculated (the default is once per day).
```ruby
Split.configure do |config|
config.winning_alternative_recalculation_interval = 3600 # 1 hour
end
```
## Extras
### Weighted alternatives
Perhaps you only want to show an alternative to 10% of your visitors because it is very experimental or not yet fully load tested.
To do this you can pass a weight with each alternative in the following ways:
```ruby
ab_test(:homepage_design, {'Old' => 18}, {'New' => 2})
ab_test(:homepage_design, 'Old', {'New' => 1.0/9})
ab_test(:homepage_design, {'Old' => 9}, 'New')
```
This will only show the new alternative to visitors 1 in 10 times, the default weight for an alternative is 1.
### Overriding alternatives
For development and testing, you may wish to force your app to always return an alternative.
You can do this by passing it as a parameter in the url.
If you have an experiment called `button_color` with alternatives called `red` and `blue` used on your homepage, a url such as:
http://myawesomesite.com?ab_test[button_color]=red
will always have red buttons. This won't be stored in your session or count towards to results, unless you set the `store_override` configuration option.
In the event you want to disable all tests without having to know the individual experiment names, add a `SPLIT_DISABLE` query parameter.
http://myawesomesite.com?SPLIT_DISABLE=true
It is not required to send `SPLIT_DISABLE=false` to activate Split.
### Rspec Helper
To aid testing with RSpec, write `spec/support/split_helper.rb` and call `use_ab_test(alternatives_by_experiment)` in your specs as instructed below:
```ruby
# Create a file with these contents at 'spec/support/split_helper.rb'
# and ensure it is `require`d in your rails_helper.rb or spec_helper.rb
module SplitHelper
# Force a specific experiment alternative to always be returned:
# use_ab_test(signup_form: "single_page")
#
# Force alternatives for multiple experiments:
# use_ab_test(signup_form: "single_page", pricing: "show_enterprise_prices")
#
def use_ab_test(alternatives_by_experiment)
allow_any_instance_of(Split::Helper).to receive(:ab_test) do |_receiver, experiment, &block|
variant = alternatives_by_experiment.fetch(experiment) { |key| raise "Unknown experiment '#{key}'" }
block.call(variant) unless block.nil?
variant
end
end
end
# Make the `use_ab_test` method available to all specs:
RSpec.configure do |config|
config.include SplitHelper
end
```
Now you can call `use_ab_test(alternatives_by_experiment)` in your specs, for example:
```ruby
it "registers using experimental signup" do
use_ab_test experiment_name: "alternative_name"
post "/signups"
...
end
```
### Starting experiments manually
By default new A/B tests will be active right after deployment. In case you would like to start new test a while after
the deploy, you can do it by setting the `start_manually` configuration option to `true`.
After choosing this option tests won't be started right after deploy, but after pressing the `Start` button in Split admin dashboard. If a test is deleted from the Split dashboard, then it can only be started after pressing the `Start` button whenever being re-initialized.
### Reset after completion
When a user completes a test their session is reset so that they may start the test again in the future.
To stop this behaviour you can pass the following option to the `ab_finished` method:
```ruby
ab_finished(:experiment_name, reset: false)
```
The user will then always see the alternative they started with.
Any old unfinished experiment key will be deleted from the user's data storage if the experiment had been removed or is over and a winner had been chosen. This allows a user to enroll into any new experiment in cases when the `allow_multiple_experiments` config option is set to `false`.
### Reset experiments manually
By default Split automatically resets the experiment whenever it detects the configuration for an experiment has changed (e.g. you call `ab_test` with different alternatives). You can prevent this by setting the option `reset_manually` to `true`.
You may want to do this when you want to change something, like the variants' names, the metadata about an experiment, etc. without resetting everything.
### Multiple experiments at once
By default Split will avoid users participating in multiple experiments at once. This means you are less likely to skew results by adding in more variation to your tests.
To stop this behaviour and allow users to participate in multiple experiments at once set the `allow_multiple_experiments` config option to true like so:
```ruby
Split.configure do |config|
config.allow_multiple_experiments = true
end
```
This will allow the user to participate in any number of experiments and belong to any alternative in each experiment. This has the possible downside of a variation in one experiment influencing the outcome of another.
To address this, setting the `allow_multiple_experiments` config option to 'control' like so:
```ruby
Split.configure do |config|
config.allow_multiple_experiments = 'control'
end
```
For this to work, each and every experiment you define must have an alternative named 'control'. This will allow the user to participate in multiple experiments as long as the user belongs to the alternative 'control' in each experiment. As soon as the user belongs to an alternative named something other than 'control' the user may not participate in any more experiments. Calling ab_test(<other experiments>) will always return the first alternative without adding the user to that experiment.
### Experiment Persistence
Split comes with three built-in persistence adapters for storing users and the alternatives they've been given for each experiment.
By default Split will store the tests for each user in the session.
You can optionally configure Split to use a cookie, Redis, or any custom adapter of your choosing.
#### Cookies
```ruby
Split.configure do |config|
config.persistence = :cookie
end
```
When using the cookie persistence, Split stores data into an anonymous tracking cookie named 'split', which expires in 1 year. To change that, set the `persistence_cookie_length` in the configuration (unit of time in seconds).
```ruby
Split.configure do |config|
config.persistence = :cookie
config.persistence_cookie_length = 2592000 # 30 days
end
```
The data stored consists of the experiment name and the variants the user is in. Example: { "experiment_name" => "variant_a" }
__Note:__ Using cookies depends on `ActionDispatch::Cookies` or any identical API
#### Redis
Using Redis will allow ab_users to persist across sessions or machines.
```ruby
Split.configure do |config|
config.persistence = Split::Persistence::RedisAdapter.with_config(lookup_by: -> (context) { context.current_user_id })
# Equivalent
# config.persistence = Split::Persistence::RedisAdapter.with_config(lookup_by: :current_user_id)
end
```
Options:
* `lookup_by`: method to invoke per request for uniquely identifying ab_users (mandatory configuration)
* `namespace`: separate namespace to store these persisted values (default "persistence")
* `expire_seconds`: sets TTL for user key. (if a user is in multiple experiments most recent update will reset TTL for all their assignments)
#### Dual Adapter
The Dual Adapter allows the use of different persistence adapters for logged-in and logged-out users. A common use case is to use Redis for logged-in users and Cookies for logged-out users.
```ruby
cookie_adapter = Split::Persistence::CookieAdapter
redis_adapter = Split::Persistence::RedisAdapter.with_config(
lookup_by: -> (context) { context.send(:current_user).try(:id) },
expire_seconds: 2592000)
Split.configure do |config|
config.persistence = Split::Persistence::DualAdapter.with_config(
logged_in: -> (context) { !context.send(:current_user).try(:id).nil? },
logged_in_adapter: redis_adapter,
logged_out_adapter: cookie_adapter)
config.persistence_cookie_length = 2592000 # 30 days
end
```
#### Custom Adapter
Your custom adapter needs to implement the same API as existing adapters.
See `Split::Persistence::CookieAdapter` or `Split::Persistence::SessionAdapter` for a starting point.
```ruby
Split.configure do |config|
config.persistence = YourCustomAdapterClass
end
```
### Trial Event Hooks
You can define methods that will be called at the same time as experiment
alternative participation and goal completion.
For example:
``` ruby
Split.configure do |config|
config.on_trial = :log_trial # run on every trial
config.on_trial_choose = :log_trial_choose # run on trials with new users only
config.on_trial_complete = :log_trial_complete
end
```
Set these attributes to a method name available in the same context as the
`ab_test` method. These methods should accept one argument, a `Trial` instance.
``` ruby
def log_trial(trial)
logger.info "experiment=%s alternative=%s user=%s" %
[ trial.experiment.name, trial.alternative, current_user.id ]
end
def log_trial_choose(trial)
logger.info "[new user] experiment=%s alternative=%s user=%s" %
[ trial.experiment.name, trial.alternative, current_user.id ]
end
def log_trial_complete(trial)
logger.info "experiment=%s alternative=%s user=%s complete=true" %
[ trial.experiment.name, trial.alternative, current_user.id ]
end
```
#### Views
If you are running `ab_test` from a view, you must define your event
hook callback as a
[helper_method](https://apidock.com/rails/AbstractController/Helpers/ClassMethods/helper_method)
in the controller:
``` ruby
helper_method :log_trial_choose
def log_trial_choose(trial)
logger.info "experiment=%s alternative=%s user=%s" %
[ trial.experiment.name, trial.alternative, current_user.id ]
end
```
### Experiment Hooks
You can assign a proc that will be called when an experiment is reset or deleted. You can use these hooks to call methods within your application to keep data related to experiments in sync with Split.
For example:
``` ruby
Split.configure do |config|
# after experiment reset or deleted
config.on_experiment_reset = -> (example) { # Do something on reset }
config.on_experiment_delete = -> (experiment) { # Do something else on delete }
# before experiment reset or deleted
config.on_before_experiment_reset = -> (example) { # Do something on reset }
config.on_before_experiment_delete = -> (experiment) { # Do something else on delete }
# after experiment winner had been set
config.on_experiment_winner_choose = -> (experiment) { # Do something on winner choose }
end
```
## Web Interface
Split comes with a Sinatra-based front end to get an overview of how your experiments are doing.
If you are running Rails 2: You can mount this inside your app using Rack::URLMap in your `config.ru`
```ruby
require 'split/dashboard'
run Rack::URLMap.new \
"/" => Your::App.new,
"/split" => Split::Dashboard.new
```
However, if you are using Rails 3 or higher: You can mount this inside your app routes by first adding this to the Gemfile:
```ruby
gem 'split', require: 'split/dashboard'
```
Then adding this to config/routes.rb
```ruby
mount Split::Dashboard, at: 'split'
```
You may want to password protect that page, you can do so with `Rack::Auth::Basic` (in your split initializer file)
```ruby
# Rails apps or apps that already depend on activesupport
Split::Dashboard.use Rack::Auth::Basic do |username, password|
# Protect against timing attacks:
# - Use & (do not use &&) so that it doesn't short circuit.
# - Use digests to stop length information leaking
ActiveSupport::SecurityUtils.secure_compare(::Digest::SHA256.hexdigest(username), ::Digest::SHA256.hexdigest(ENV["SPLIT_USERNAME"])) &
ActiveSupport::SecurityUtils.secure_compare(::Digest::SHA256.hexdigest(password), ::Digest::SHA256.hexdigest(ENV["SPLIT_PASSWORD"]))
end
# Apps without activesupport
Split::Dashboard.use Rack::Auth::Basic do |username, password|
# Protect against timing attacks:
# - Use & (do not use &&) so that it doesn't short circuit.
# - Use digests to stop length information leaking
Rack::Utils.secure_compare(::Digest::SHA256.hexdigest(username), ::Digest::SHA256.hexdigest(ENV["SPLIT_USERNAME"])) &
Rack::Utils.secure_compare(::Digest::SHA256.hexdigest(password), ::Digest::SHA256.hexdigest(ENV["SPLIT_PASSWORD"]))
end
```
You can even use Devise or any other Warden-based authentication method to authorize users. Just replace `mount Split::Dashboard, :at => 'split'` in `config/routes.rb` with the following:
```ruby
match "/split" => Split::Dashboard, anchor: false, via: [:get, :post, :delete], constraints: -> (request) do
request.env['warden'].authenticated? # are we authenticated?
request.env['warden'].authenticate! # authenticate if not already
# or even check any other condition such as request.env['warden'].user.is_admin?
end
meta:
a:
text: "Have a fantastic day"
b:
text: "Don't get hit by a bus"
```
This allows for some advanced experiment configuration using methods like:
## Configuration
You can override the default configuration options of Split like so:
trial.metadata['text'] # => "Have a fantastic day"
```
#### Metrics
You might wish to track generic metrics, such as conversions, and use
defaults to `redis://localhost:6379` if not specified by configure block.
On platforms like Heroku, Split will use the value of `REDIS_PROVIDER` to
determine which env variable key to use when retrieving the host config. This
defaults to `REDIS_URL`.
### Filtering
In most scenarios you don't want to have AB-Testing enabled for web spiders, robots or special groups of users.
Split provides functionality to filter this based on a predefined, extensible list of bots, IP-lists or custom exclude logic.
```ruby
Split.configure do |config|
# bot config
config.robot_regex = /my_custom_robot_regex/ # or
config.bots['newbot'] = "Description for bot with 'newbot' user agent, which will be added to config.robot_regex for exclusion"
# IP config
config.ignore_ip_addresses << '81.19.48.130' # or regex: /81\.19\.48\.[0-9]+/
# or provide your own filter functionality, the default is proc{ |request| is_robot? || is_ignored_ip_address? || is_preview? }
config.ignore_filter = -> (request) { CustomExcludeLogic.excludes?(request) }
end
```
### Experiment configuration
Instead of providing the experiment options inline, you can store them
in a hash. This hash can control your experiment's alternatives, weights,
algorithm and if the experiment resets once finished:
```ruby
Split.configure do |config|
config.experiments = {
my_first_experiment: {
alternatives: ["a", "b"],
resettable: false
},
:my_second_experiment => {
algorithm: 'Split::Algorithms::Whiplash',
alternatives: [
{ name: "a", percent: 67 },
{ name: "b", percent: 33 }
]
}
}
end
```
You can also store your experiments in a YAML file:
```ruby
Split.configure do |config|
config.experiments = YAML.load_file "config/experiments.yml"
end
```
You can then define the YAML file like:
```yaml
my_first_experiment:
alternatives:
- a
- b
my_second_experiment:
alternatives:
- name: a
percent: 67
- name: b
percent: 33
resettable: false
```
This simplifies the calls from your code:
```ruby
ab_test(:my_first_experiment)
```
and:
```ruby
ab_finished(:my_first_experiment)
```
You can also add meta data for each experiment, which is very useful when you need more than an alternative name to change behaviour:
```ruby
Split.configure do |config|
config.experiments = {
my_first_experiment: {
alternatives: ["a", "b"],
metadata: {
"a" => {"text" => "Have a fantastic day"},
"b" => {"text" => "Don't get hit by a bus"}
}
}
}
end
```
```yaml
my_first_experiment:
alternatives:
- a
- b
metadata:
a:
text: "Have a fantastic day"
b:
text: "Don't get hit by a bus"
```
This allows for some advanced experiment configuration using methods like:
```ruby
trial.alternative.name # => "a"
trial.metadata['text'] # => "Have a fantastic day"
```
or in views:
```erb
<% ab_test("my_first_experiment") do |alternative, meta| %>
<%= alternative %>
<small><%= meta['text'] %></small>
<% end %>
```
The keys used in meta data should be Strings
#### Metrics
You might wish to track generic metrics, such as conversions, and use
those to complete multiple different experiments without adding more to
your code. You can use the configuration hash to do this, thanks to
the `:metric` option.
```ruby
Split.configure do |config|
config.experiments = {
my_first_experiment: {
alternatives: ["a", "b"],
metric: :my_metric
}
}
end
```
Your code may then track a completion using the metric instead of
the experiment name:
```ruby
ab_finished(:my_metric)
```
You can also create a new metric by instantiating and saving a new Metric object.
```ruby
Split::Metric.new(:my_metric)
Split::Metric.save
```
#### Goals
You might wish to allow an experiment to have multiple, distinguishable goals.
The API to define goals for an experiment is this:
```ruby
ab_test({link_color: ["purchase", "refund"]}, "red", "blue")
```
or you can define them in a configuration file:
```ruby
Split.configure do |config|
config.experiments = {
link_color: {
alternatives: ["red", "blue"],
goals: ["purchase", "refund"]
}
}
end
```
To complete a goal conversion, you do it like:
```ruby
ab_finished(link_color: "purchase")
```
Note that if you pass additional options, that should be a separate hash:
```ruby
ab_finished({ link_color: "purchase" }, reset: false)
```
**NOTE:** This does not mean that a single experiment can complete more than one goal.
Once you finish one of the goals, the test is considered to be completed, and finishing the other goal will no longer register. (Assuming the test runs with `reset: false`.)
**Good Example**: Test if listing Plan A first result in more conversions to Plan A (goal: "plana_conversion") or Plan B (goal: "planb_conversion").
**Bad Example**: Test if button color increases conversion rate through multiple steps of a funnel. THIS WILL NOT WORK.
**Bad Example**: Test both how button color affects signup *and* how it affects login, at the same time. THIS WILL NOT WORK.
#### Combined Experiments
If you want to test how button color affects signup *and* how it affects login at the same time, use combined experiments.
Configure like so:
```ruby
Split.configuration.experiments = {
:button_color_experiment => {
:alternatives => ["blue", "green"],
:combined_experiments => ["button_color_on_signup", "button_color_on_login"]
}
}
```
Starting the combined test starts all combined experiments
```ruby
ab_combined_test(:button_color_experiment)
```
Finish each combined test as normal
```ruby
ab_finished(:button_color_on_login)
ab_finished(:button_color_on_signup)
```
**Additional Configuration**:
* Be sure to enable `allow_multiple_experiments`
* In Sinatra include the CombinedExperimentsHelper
```
helpers Split::CombinedExperimentsHelper
```
### DB failover solution
Due to the fact that Redis has no automatic failover mechanism, it's
possible to switch on the `db_failover` config option, so that `ab_test`
and `ab_finished` will not crash in case of a db failure. `ab_test` always
delivers alternative A (the first one) in that case.
It's also possible to set a `db_failover_on_db_error` callback (proc)
for example to log these errors via Rails.logger.
### Redis
You may want to change the Redis host and port Split connects to, or
set various other options at startup.
Split has a `redis` setter which can be given a string or a Redis
object. This means if you're already using Redis in your app, Split
can re-use the existing connection.
String: `Split.redis = 'redis://localhost:6379'`
Redis: `Split.redis = $redis`
For our rails app we have a `config/initializers/split.rb` file where
we load `config/split.yml` by hand and set the Redis information
appropriately.
Here's our `config/split.yml`:
```yml
development: redis://localhost:6379
test: redis://localhost:6379
staging: redis://redis1.example.com:6379
fi: redis://localhost:6379
production: redis://redis1.example.com:6379
```
And our initializer:
```ruby
split_config = YAML.load_file(Rails.root.join('config', 'split.yml'))
Split.redis = split_config[Rails.env]
```
### Redis Caching (v4.0+)
In some high-volume usage scenarios, Redis load can be incurred by repeated
fetches for fairly static data. Enabling caching will reduce this load.
```ruby
Split.configuration.cache = true
````
This currently caches:
- `Split::ExperimentCatalog.find`
- `Split::Experiment.start_time`
- `Split::Experiment.winner`
## Namespaces
If you're running multiple, separate instances of Split you may want
to namespace the keyspaces so they do not overlap. This is not unlike
the approach taken by many memcached clients.
This feature can be provided by the [redis-namespace](https://github.com/defunkt/redis-namespace)
library. To configure Split to use `Redis::Namespace`, do the following:
1. Add `redis-namespace` to your Gemfile:
```ruby
gem 'redis-namespace'
```
2. Configure `Split.redis` to use a `Redis::Namespace` instance (possible in an
initializer):
```ruby
redis = Redis.new(url: ENV['REDIS_URL']) # or whatever config you want
Split.redis = Redis::Namespace.new(:your_namespace, redis: redis)
```
## Outside of a Web Session
Split provides the Helper module to facilitate running experiments inside web sessions.
Alternatively, you can access the underlying Metric, Trial, Experiment and Alternative objects to
conduct experiments that are not tied to a web session.
```ruby
# create a new experiment
experiment = Split::ExperimentCatalog.find_or_create('color', 'red', 'blue')
# create a new trial
trial = Split::Trial.new(:experiment => experiment)
# run trial
trial.choose!
# get the result, returns either red or blue
trial.alternative.name
# if the goal has been achieved, increment the successful completions for this alternative.
if goal_achieved?
trial.complete!
end
```
## Algorithms
By default, Split ships with `Split::Algorithms::WeightedSample` that randomly selects from possible alternatives for a traditional a/b test.
It is possible to specify static weights to favor certain alternatives.
`Split::Algorithms::Whiplash` is an implementation of a [multi-armed bandit algorithm](http://stevehanov.ca/blog/index.php?id=132).
This algorithm will automatically weight the alternatives based on their relative performance,
choosing the better-performing ones more often as trials are completed.
`Split::Algorithms::BlockRandomization` is an algorithm that ensures equal
participation across all alternatives. This algorithm will choose the alternative
with the fewest participants. In the event of multiple minimum participant alternatives
(i.e. starting a new "Block") the algorithm will choose a random alternative from
those minimum participant alternatives.
Users may also write their own algorithms. The default algorithm may be specified globally in the configuration file, or on a per experiment basis using the experiments hash of the configuration file.
To change the algorithm globally for all experiments, use the following in your initializer:
```ruby
Split.configure do |config|
config.algorithm = Split::Algorithms::Whiplash
end
```
## Extensions
- [Split::Export](https://github.com/splitrb/split-export) - Easily export A/B test data out of Split.
- [Split::Analytics](https://github.com/splitrb/split-analytics) - Push test data to Google Analytics.
- [Split::Mongoid](https://github.com/MongoHQ/split-mongoid) - Store experiment data in mongoid (still uses redis).
- [Split::Cacheable](https://github.com/harrystech/split_cacheable) - Automatically create cache buckets per test.
- [Split::Counters](https://github.com/bernardkroes/split-counters) - Add counters per experiment and alternative.
- [Split::Cli](https://github.com/craigmcnamara/split-cli) - A CLI to trigger Split A/B tests.
## Screencast
Ryan bates has produced an excellent 10 minute screencast about split on the Railscasts site: [A/B Testing with Split](http://railscasts.com/episodes/331-a-b-testing-with-split)
## Blogposts
* [Recipe: A/B testing with KISSMetrics and the split gem](https://robots.thoughtbot.com/post/9595887299/recipe-a-b-testing-with-kissmetrics-and-the-split-gem)
* [Rails A/B testing with Split on Heroku](http://blog.nathanhumbert.com/2012/02/rails-ab-testing-with-split-on-heroku.html)
## Backers
Support us with a monthly donation and help us continue our activities. [[Become a backer](https://opencollective.com/split#backer)]
<a href="https://opencollective.com/split/backer/0/website" target="_blank"><img src="https://opencollective.com/split/backer/0/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/1/website" target="_blank"><img src="https://opencollective.com/split/backer/1/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/2/website" target="_blank"><img src="https://opencollective.com/split/backer/2/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/3/website" target="_blank"><img src="https://opencollective.com/split/backer/3/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/4/website" target="_blank"><img src="https://opencollective.com/split/backer/4/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/5/website" target="_blank"><img src="https://opencollective.com/split/backer/5/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/6/website" target="_blank"><img src="https://opencollective.com/split/backer/6/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/7/website" target="_blank"><img src="https://opencollective.com/split/backer/7/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/8/website" target="_blank"><img src="https://opencollective.com/split/backer/8/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/9/website" target="_blank"><img src="https://opencollective.com/split/backer/9/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/10/website" target="_blank"><img src="https://opencollective.com/split/backer/10/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/11/website" target="_blank"><img src="https://opencollective.com/split/backer/11/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/12/website" target="_blank"><img src="https://opencollective.com/split/backer/12/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/13/website" target="_blank"><img src="https://opencollective.com/split/backer/13/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/14/website" target="_blank"><img src="https://opencollective.com/split/backer/14/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/15/website" target="_blank"><img src="https://opencollective.com/split/backer/15/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/16/website" target="_blank"><img src="https://opencollective.com/split/backer/16/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/17/website" target="_blank"><img src="https://opencollective.com/split/backer/17/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/18/website" target="_blank"><img src="https://opencollective.com/split/backer/18/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/19/website" target="_blank"><img src="https://opencollective.com/split/backer/19/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/20/website" target="_blank"><img src="https://opencollective.com/split/backer/20/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/21/website" target="_blank"><img src="https://opencollective.com/split/backer/21/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/22/website" target="_blank"><img src="https://opencollective.com/split/backer/22/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/23/website" target="_blank"><img src="https://opencollective.com/split/backer/23/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/24/website" target="_blank"><img src="https://opencollective.com/split/backer/24/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/25/website" target="_blank"><img src="https://opencollective.com/split/backer/25/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/26/website" target="_blank"><img src="https://opencollective.com/split/backer/26/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/27/website" target="_blank"><img src="https://opencollective.com/split/backer/27/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/28/website" target="_blank"><img src="https://opencollective.com/split/backer/28/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/29/website" target="_blank"><img src="https://opencollective.com/split/backer/29/avatar.svg"></a>
## Sponsors
Become a sponsor and get your logo on our README on Github with a link to your site. [[Become a sponsor](https://opencollective.com/split#sponsor)]
<a href="https://opencollective.com/split/sponsor/0/website" target="_blank"><img src="https://opencollective.com/split/sponsor/0/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/1/website" target="_blank"><img src="https://opencollective.com/split/sponsor/1/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/2/website" target="_blank"><img src="https://opencollective.com/split/sponsor/2/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/3/website" target="_blank"><img src="https://opencollective.com/split/sponsor/3/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/4/website" target="_blank"><img src="https://opencollective.com/split/sponsor/4/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/5/website" target="_blank"><img src="https://opencollective.com/split/sponsor/5/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/6/website" target="_blank"><img src="https://opencollective.com/split/sponsor/6/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/7/website" target="_blank"><img src="https://opencollective.com/split/sponsor/7/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/8/website" target="_blank"><img src="https://opencollective.com/split/sponsor/8/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/9/website" target="_blank"><img src="https://opencollective.com/split/sponsor/9/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/10/website" target="_blank"><img src="https://opencollective.com/split/sponsor/10/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/11/website" target="_blank"><img src="https://opencollective.com/split/sponsor/11/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/12/website" target="_blank"><img src="https://opencollective.com/split/sponsor/12/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/13/website" target="_blank"><img src="https://opencollective.com/split/sponsor/13/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/14/website" target="_blank"><img src="https://opencollective.com/split/sponsor/14/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/15/website" target="_blank"><img src="https://opencollective.com/split/sponsor/15/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/16/website" target="_blank"><img src="https://opencollective.com/split/sponsor/16/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/17/website" target="_blank"><img src="https://opencollective.com/split/sponsor/17/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/18/website" target="_blank"><img src="https://opencollective.com/split/sponsor/18/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/19/website" target="_blank"><img src="https://opencollective.com/split/sponsor/19/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/20/website" target="_blank"><img src="https://opencollective.com/split/sponsor/20/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/21/website" target="_blank"><img src="https://opencollective.com/split/sponsor/21/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/22/website" target="_blank"><img src="https://opencollective.com/split/sponsor/22/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/23/website" target="_blank"><img src="https://opencollective.com/split/sponsor/23/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/24/website" target="_blank"><img src="https://opencollective.com/split/sponsor/24/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/25/website" target="_blank"><img src="https://opencollective.com/split/sponsor/25/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/26/website" target="_blank"><img src="https://opencollective.com/split/sponsor/26/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/27/website" target="_blank"><img src="https://opencollective.com/split/sponsor/27/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/28/website" target="_blank"><img src="https://opencollective.com/split/sponsor/28/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/29/website" target="_blank"><img src="https://opencollective.com/split/sponsor/29/avatar.svg"></a>
## Contribute
Please do! Over 70 different people have contributed to the project, you can see them all here: https://github.com/splitrb/split/graphs/contributors.
### Development
The source code is hosted at [GitHub](https://github.com/splitrb/split).
Report issues and feature requests on [GitHub Issues](https://github.com/splitrb/split/issues).
You can find a discussion form on [Google Groups](https://groups.google.com/d/forum/split-ruby).
### Tests
Run the tests like this:
# Start a Redis server in another tab.
redis-server
bundle
rake spec
### A Note on Patches and Pull Requests
* Fork the project.
* Make your feature addition or bug fix.
* Add tests for it. This is important so I don't break it in a
future version unintentionally.
* Add documentation if necessary.
* Commit. Do not mess with the rakefile, version, or history.
(If you want to have your own version, that is fine. But bump the version in a commit by itself, which I can ignore when I pull.)
* Send a pull request. Bonus points for topic branches.
### Code of Conduct
Please note that this project is released with a [Contributor Code of Conduct](CODE_OF_CONDUCT.md). By participating in this project you agree to abide by its terms.
## Copyright
[MIT License](LICENSE) © 2019 [Andrew Nesbitt](https://github.com/andrew).
<MSG> Merge pull request #297 from ekorneeff/master
Update README section about metadata
<DFF> @@ -455,8 +455,8 @@ my_first_experiment:
meta:
a:
text: "Have a fantastic day"
- b:
- text: "Don't get hit by a bus"
+ b:
+ text: "Don't get hit by a bus"
```
This allows for some advanced experiment configuration using methods like:
@@ -467,6 +467,15 @@ trial.alternative.name # => "a"
trial.metadata['text'] # => "Have a fantastic day"
```
+or in views:
+
+```erb
+<% ab_test("my_first_experiment") do |alternative, meta| %>
+ <%= alternative %>
+ <small><%= meta['text'] %></small>
+<% end %>
+```
+
#### Metrics
You might wish to track generic metrics, such as conversions, and use
| 11 | Merge pull request #297 from ekorneeff/master | 2 | .md | md | mit | splitrb/split |
10071111 | <NME> README.md
<BEF> # [Split](https://libraries.io/rubygems/split)
[](http://badge.fury.io/rb/split)

[](https://codeclimate.com/github/splitrb/split)
[](https://codeclimate.com/github/splitrb/split/coverage)
[](https://github.com/RichardLitt/standard-readme)
[](https://www.codetriage.com/splitrb/split)
> 📈 The Rack Based A/B testing framework https://libraries.io/rubygems/split
Split is a rack based A/B testing framework designed to work with Rails, Sinatra or any other rack based app.
Split is heavily inspired by the [Abingo](https://github.com/ryanb/abingo) and [Vanity](https://github.com/assaf/vanity) Rails A/B testing plugins and [Resque](https://github.com/resque/resque) in its use of Redis.
Split is designed to be hacker friendly, allowing for maximum customisation and extensibility.
## Install
### Requirements
Split v4.0+ is currently tested with Ruby >= 2.5 and Rails >= 5.2.
If your project requires compatibility with Ruby 2.4.x or older Rails versions. You can try v3.0 or v0.8.0(for Ruby 1.9.3)
Split uses Redis as a datastore.
Split only supports Redis 4.0 or greater.
If you're on OS X, Homebrew is the simplest way to install Redis:
```bash
brew install redis
redis-server /usr/local/etc/redis.conf
```
You now have a Redis daemon running on port `6379`.
### Setup
```bash
gem install split
```
#### Rails
Adding `gem 'split'` to your Gemfile will autoload it when rails starts up, as long as you've configured Redis it will 'just work'.
#### Sinatra
To configure Sinatra with Split you need to enable sessions and mix in the helper methods. Add the following lines at the top of your Sinatra app:
```ruby
require 'split'
class MySinatraApp < Sinatra::Base
enable :sessions
helpers Split::Helper
get '/' do
...
end
```
## Usage
To begin your A/B test use the `ab_test` method, naming your experiment with the first argument and then the different alternatives which you wish to test on as the other arguments.
`ab_test` returns one of the alternatives, if a user has already seen that test they will get the same alternative as before, which you can use to split your code on.
It can be used to render different templates, show different text or any other case based logic.
`ab_finished` is used to make a completion of an experiment, or conversion.
Example: View
```erb
<% ab_test(:login_button, "/images/button1.jpg", "/images/button2.jpg") do |button_file| %>
<%= image_tag(button_file, alt: "Login!") %>
<% end %>
```
Example: Controller
```ruby
def register_new_user
# See what level of free points maximizes users' decision to buy replacement points.
@starter_points = ab_test(:new_user_free_points, '100', '200', '300')
end
```
Example: Conversion tracking (in a controller!)
```ruby
def buy_new_points
# some business logic
ab_finished(:new_user_free_points)
end
```
Example: Conversion tracking (in a view)
```erb
Thanks for signing up, dude! <% ab_finished(:signup_page_redesign) %>
```
You can find more examples, tutorials and guides on the [wiki](https://github.com/splitrb/split/wiki).
## Statistical Validity
Split has two options for you to use to determine which alternative is the best.
The first option (default on the dashboard) uses a z test (n>30) for the difference between your control and alternative conversion rates to calculate statistical significance. This test will tell you whether an alternative is better or worse than your control, but it will not distinguish between which alternative is the best in an experiment with multiple alternatives. Split will only tell you if your experiment is 90%, 95%, or 99% significant, and this test only works if you have more than 30 participants and 5 conversions for each branch.
As per this [blog post](https://www.evanmiller.org/how-not-to-run-an-ab-test.html) on the pitfalls of A/B testing, it is highly recommended that you determine your requisite sample size for each branch before running the experiment. Otherwise, you'll have an increased rate of false positives (experiments which show a significant effect where really there is none).
[Here](https://www.evanmiller.org/ab-testing/sample-size.html) is a sample size calculator for your convenience.
The second option uses simulations from a beta distribution to determine the probability that the given alternative is the winner compared to all other alternatives. You can view these probabilities by clicking on the drop-down menu labeled "Confidence." This option should be used when the experiment has more than just 1 control and 1 alternative. It can also be used for a simple, 2-alternative A/B test.
Calculating the beta-distribution simulations for a large number of experiments can be slow, so the results are cached. You can specify how often they should be recalculated (the default is once per day).
```ruby
Split.configure do |config|
config.winning_alternative_recalculation_interval = 3600 # 1 hour
end
```
## Extras
### Weighted alternatives
Perhaps you only want to show an alternative to 10% of your visitors because it is very experimental or not yet fully load tested.
To do this you can pass a weight with each alternative in the following ways:
```ruby
ab_test(:homepage_design, {'Old' => 18}, {'New' => 2})
ab_test(:homepage_design, 'Old', {'New' => 1.0/9})
ab_test(:homepage_design, {'Old' => 9}, 'New')
```
This will only show the new alternative to visitors 1 in 10 times, the default weight for an alternative is 1.
### Overriding alternatives
For development and testing, you may wish to force your app to always return an alternative.
You can do this by passing it as a parameter in the url.
If you have an experiment called `button_color` with alternatives called `red` and `blue` used on your homepage, a url such as:
http://myawesomesite.com?ab_test[button_color]=red
will always have red buttons. This won't be stored in your session or count towards to results, unless you set the `store_override` configuration option.
In the event you want to disable all tests without having to know the individual experiment names, add a `SPLIT_DISABLE` query parameter.
http://myawesomesite.com?SPLIT_DISABLE=true
It is not required to send `SPLIT_DISABLE=false` to activate Split.
### Rspec Helper
To aid testing with RSpec, write `spec/support/split_helper.rb` and call `use_ab_test(alternatives_by_experiment)` in your specs as instructed below:
```ruby
# Create a file with these contents at 'spec/support/split_helper.rb'
# and ensure it is `require`d in your rails_helper.rb or spec_helper.rb
module SplitHelper
# Force a specific experiment alternative to always be returned:
# use_ab_test(signup_form: "single_page")
#
# Force alternatives for multiple experiments:
# use_ab_test(signup_form: "single_page", pricing: "show_enterprise_prices")
#
def use_ab_test(alternatives_by_experiment)
allow_any_instance_of(Split::Helper).to receive(:ab_test) do |_receiver, experiment, &block|
variant = alternatives_by_experiment.fetch(experiment) { |key| raise "Unknown experiment '#{key}'" }
block.call(variant) unless block.nil?
variant
end
end
end
# Make the `use_ab_test` method available to all specs:
RSpec.configure do |config|
config.include SplitHelper
end
```
Now you can call `use_ab_test(alternatives_by_experiment)` in your specs, for example:
```ruby
it "registers using experimental signup" do
use_ab_test experiment_name: "alternative_name"
post "/signups"
...
end
```
### Starting experiments manually
By default new A/B tests will be active right after deployment. In case you would like to start new test a while after
the deploy, you can do it by setting the `start_manually` configuration option to `true`.
After choosing this option tests won't be started right after deploy, but after pressing the `Start` button in Split admin dashboard. If a test is deleted from the Split dashboard, then it can only be started after pressing the `Start` button whenever being re-initialized.
### Reset after completion
When a user completes a test their session is reset so that they may start the test again in the future.
To stop this behaviour you can pass the following option to the `ab_finished` method:
```ruby
ab_finished(:experiment_name, reset: false)
```
The user will then always see the alternative they started with.
Any old unfinished experiment key will be deleted from the user's data storage if the experiment had been removed or is over and a winner had been chosen. This allows a user to enroll into any new experiment in cases when the `allow_multiple_experiments` config option is set to `false`.
### Reset experiments manually
By default Split automatically resets the experiment whenever it detects the configuration for an experiment has changed (e.g. you call `ab_test` with different alternatives). You can prevent this by setting the option `reset_manually` to `true`.
You may want to do this when you want to change something, like the variants' names, the metadata about an experiment, etc. without resetting everything.
### Multiple experiments at once
By default Split will avoid users participating in multiple experiments at once. This means you are less likely to skew results by adding in more variation to your tests.
To stop this behaviour and allow users to participate in multiple experiments at once set the `allow_multiple_experiments` config option to true like so:
```ruby
Split.configure do |config|
config.allow_multiple_experiments = true
end
```
This will allow the user to participate in any number of experiments and belong to any alternative in each experiment. This has the possible downside of a variation in one experiment influencing the outcome of another.
To address this, setting the `allow_multiple_experiments` config option to 'control' like so:
```ruby
Split.configure do |config|
config.allow_multiple_experiments = 'control'
end
```
For this to work, each and every experiment you define must have an alternative named 'control'. This will allow the user to participate in multiple experiments as long as the user belongs to the alternative 'control' in each experiment. As soon as the user belongs to an alternative named something other than 'control' the user may not participate in any more experiments. Calling ab_test(<other experiments>) will always return the first alternative without adding the user to that experiment.
### Experiment Persistence
Split comes with three built-in persistence adapters for storing users and the alternatives they've been given for each experiment.
By default Split will store the tests for each user in the session.
You can optionally configure Split to use a cookie, Redis, or any custom adapter of your choosing.
#### Cookies
```ruby
Split.configure do |config|
config.persistence = :cookie
end
```
When using the cookie persistence, Split stores data into an anonymous tracking cookie named 'split', which expires in 1 year. To change that, set the `persistence_cookie_length` in the configuration (unit of time in seconds).
```ruby
Split.configure do |config|
config.persistence = :cookie
config.persistence_cookie_length = 2592000 # 30 days
end
```
The data stored consists of the experiment name and the variants the user is in. Example: { "experiment_name" => "variant_a" }
__Note:__ Using cookies depends on `ActionDispatch::Cookies` or any identical API
#### Redis
Using Redis will allow ab_users to persist across sessions or machines.
```ruby
Split.configure do |config|
config.persistence = Split::Persistence::RedisAdapter.with_config(lookup_by: -> (context) { context.current_user_id })
# Equivalent
# config.persistence = Split::Persistence::RedisAdapter.with_config(lookup_by: :current_user_id)
end
```
Options:
* `lookup_by`: method to invoke per request for uniquely identifying ab_users (mandatory configuration)
* `namespace`: separate namespace to store these persisted values (default "persistence")
* `expire_seconds`: sets TTL for user key. (if a user is in multiple experiments most recent update will reset TTL for all their assignments)
#### Dual Adapter
The Dual Adapter allows the use of different persistence adapters for logged-in and logged-out users. A common use case is to use Redis for logged-in users and Cookies for logged-out users.
```ruby
cookie_adapter = Split::Persistence::CookieAdapter
redis_adapter = Split::Persistence::RedisAdapter.with_config(
lookup_by: -> (context) { context.send(:current_user).try(:id) },
expire_seconds: 2592000)
Split.configure do |config|
config.persistence = Split::Persistence::DualAdapter.with_config(
logged_in: -> (context) { !context.send(:current_user).try(:id).nil? },
logged_in_adapter: redis_adapter,
logged_out_adapter: cookie_adapter)
config.persistence_cookie_length = 2592000 # 30 days
end
```
#### Custom Adapter
Your custom adapter needs to implement the same API as existing adapters.
See `Split::Persistence::CookieAdapter` or `Split::Persistence::SessionAdapter` for a starting point.
```ruby
Split.configure do |config|
config.persistence = YourCustomAdapterClass
end
```
### Trial Event Hooks
You can define methods that will be called at the same time as experiment
alternative participation and goal completion.
For example:
``` ruby
Split.configure do |config|
config.on_trial = :log_trial # run on every trial
config.on_trial_choose = :log_trial_choose # run on trials with new users only
config.on_trial_complete = :log_trial_complete
end
```
Set these attributes to a method name available in the same context as the
`ab_test` method. These methods should accept one argument, a `Trial` instance.
``` ruby
def log_trial(trial)
logger.info "experiment=%s alternative=%s user=%s" %
[ trial.experiment.name, trial.alternative, current_user.id ]
end
def log_trial_choose(trial)
logger.info "[new user] experiment=%s alternative=%s user=%s" %
[ trial.experiment.name, trial.alternative, current_user.id ]
end
def log_trial_complete(trial)
logger.info "experiment=%s alternative=%s user=%s complete=true" %
[ trial.experiment.name, trial.alternative, current_user.id ]
end
```
#### Views
If you are running `ab_test` from a view, you must define your event
hook callback as a
[helper_method](https://apidock.com/rails/AbstractController/Helpers/ClassMethods/helper_method)
in the controller:
``` ruby
helper_method :log_trial_choose
def log_trial_choose(trial)
logger.info "experiment=%s alternative=%s user=%s" %
[ trial.experiment.name, trial.alternative, current_user.id ]
end
```
### Experiment Hooks
You can assign a proc that will be called when an experiment is reset or deleted. You can use these hooks to call methods within your application to keep data related to experiments in sync with Split.
For example:
``` ruby
Split.configure do |config|
# after experiment reset or deleted
config.on_experiment_reset = -> (example) { # Do something on reset }
config.on_experiment_delete = -> (experiment) { # Do something else on delete }
# before experiment reset or deleted
config.on_before_experiment_reset = -> (example) { # Do something on reset }
config.on_before_experiment_delete = -> (experiment) { # Do something else on delete }
# after experiment winner had been set
config.on_experiment_winner_choose = -> (experiment) { # Do something on winner choose }
end
```
## Web Interface
Split comes with a Sinatra-based front end to get an overview of how your experiments are doing.
If you are running Rails 2: You can mount this inside your app using Rack::URLMap in your `config.ru`
```ruby
require 'split/dashboard'
run Rack::URLMap.new \
"/" => Your::App.new,
"/split" => Split::Dashboard.new
```
However, if you are using Rails 3 or higher: You can mount this inside your app routes by first adding this to the Gemfile:
```ruby
gem 'split', require: 'split/dashboard'
```
Then adding this to config/routes.rb
```ruby
mount Split::Dashboard, at: 'split'
```
You may want to password protect that page, you can do so with `Rack::Auth::Basic` (in your split initializer file)
```ruby
# Rails apps or apps that already depend on activesupport
Split::Dashboard.use Rack::Auth::Basic do |username, password|
# Protect against timing attacks:
# - Use & (do not use &&) so that it doesn't short circuit.
# - Use digests to stop length information leaking
ActiveSupport::SecurityUtils.secure_compare(::Digest::SHA256.hexdigest(username), ::Digest::SHA256.hexdigest(ENV["SPLIT_USERNAME"])) &
ActiveSupport::SecurityUtils.secure_compare(::Digest::SHA256.hexdigest(password), ::Digest::SHA256.hexdigest(ENV["SPLIT_PASSWORD"]))
end
# Apps without activesupport
Split::Dashboard.use Rack::Auth::Basic do |username, password|
# Protect against timing attacks:
# - Use & (do not use &&) so that it doesn't short circuit.
# - Use digests to stop length information leaking
Rack::Utils.secure_compare(::Digest::SHA256.hexdigest(username), ::Digest::SHA256.hexdigest(ENV["SPLIT_USERNAME"])) &
Rack::Utils.secure_compare(::Digest::SHA256.hexdigest(password), ::Digest::SHA256.hexdigest(ENV["SPLIT_PASSWORD"]))
end
```
You can even use Devise or any other Warden-based authentication method to authorize users. Just replace `mount Split::Dashboard, :at => 'split'` in `config/routes.rb` with the following:
```ruby
match "/split" => Split::Dashboard, anchor: false, via: [:get, :post, :delete], constraints: -> (request) do
request.env['warden'].authenticated? # are we authenticated?
request.env['warden'].authenticate! # authenticate if not already
# or even check any other condition such as request.env['warden'].user.is_admin?
end
meta:
a:
text: "Have a fantastic day"
b:
text: "Don't get hit by a bus"
```
This allows for some advanced experiment configuration using methods like:
## Configuration
You can override the default configuration options of Split like so:
trial.metadata['text'] # => "Have a fantastic day"
```
#### Metrics
You might wish to track generic metrics, such as conversions, and use
defaults to `redis://localhost:6379` if not specified by configure block.
On platforms like Heroku, Split will use the value of `REDIS_PROVIDER` to
determine which env variable key to use when retrieving the host config. This
defaults to `REDIS_URL`.
### Filtering
In most scenarios you don't want to have AB-Testing enabled for web spiders, robots or special groups of users.
Split provides functionality to filter this based on a predefined, extensible list of bots, IP-lists or custom exclude logic.
```ruby
Split.configure do |config|
# bot config
config.robot_regex = /my_custom_robot_regex/ # or
config.bots['newbot'] = "Description for bot with 'newbot' user agent, which will be added to config.robot_regex for exclusion"
# IP config
config.ignore_ip_addresses << '81.19.48.130' # or regex: /81\.19\.48\.[0-9]+/
# or provide your own filter functionality, the default is proc{ |request| is_robot? || is_ignored_ip_address? || is_preview? }
config.ignore_filter = -> (request) { CustomExcludeLogic.excludes?(request) }
end
```
### Experiment configuration
Instead of providing the experiment options inline, you can store them
in a hash. This hash can control your experiment's alternatives, weights,
algorithm and if the experiment resets once finished:
```ruby
Split.configure do |config|
config.experiments = {
my_first_experiment: {
alternatives: ["a", "b"],
resettable: false
},
:my_second_experiment => {
algorithm: 'Split::Algorithms::Whiplash',
alternatives: [
{ name: "a", percent: 67 },
{ name: "b", percent: 33 }
]
}
}
end
```
You can also store your experiments in a YAML file:
```ruby
Split.configure do |config|
config.experiments = YAML.load_file "config/experiments.yml"
end
```
You can then define the YAML file like:
```yaml
my_first_experiment:
alternatives:
- a
- b
my_second_experiment:
alternatives:
- name: a
percent: 67
- name: b
percent: 33
resettable: false
```
This simplifies the calls from your code:
```ruby
ab_test(:my_first_experiment)
```
and:
```ruby
ab_finished(:my_first_experiment)
```
You can also add meta data for each experiment, which is very useful when you need more than an alternative name to change behaviour:
```ruby
Split.configure do |config|
config.experiments = {
my_first_experiment: {
alternatives: ["a", "b"],
metadata: {
"a" => {"text" => "Have a fantastic day"},
"b" => {"text" => "Don't get hit by a bus"}
}
}
}
end
```
```yaml
my_first_experiment:
alternatives:
- a
- b
metadata:
a:
text: "Have a fantastic day"
b:
text: "Don't get hit by a bus"
```
This allows for some advanced experiment configuration using methods like:
```ruby
trial.alternative.name # => "a"
trial.metadata['text'] # => "Have a fantastic day"
```
or in views:
```erb
<% ab_test("my_first_experiment") do |alternative, meta| %>
<%= alternative %>
<small><%= meta['text'] %></small>
<% end %>
```
The keys used in meta data should be Strings
#### Metrics
You might wish to track generic metrics, such as conversions, and use
those to complete multiple different experiments without adding more to
your code. You can use the configuration hash to do this, thanks to
the `:metric` option.
```ruby
Split.configure do |config|
config.experiments = {
my_first_experiment: {
alternatives: ["a", "b"],
metric: :my_metric
}
}
end
```
Your code may then track a completion using the metric instead of
the experiment name:
```ruby
ab_finished(:my_metric)
```
You can also create a new metric by instantiating and saving a new Metric object.
```ruby
Split::Metric.new(:my_metric)
Split::Metric.save
```
#### Goals
You might wish to allow an experiment to have multiple, distinguishable goals.
The API to define goals for an experiment is this:
```ruby
ab_test({link_color: ["purchase", "refund"]}, "red", "blue")
```
or you can define them in a configuration file:
```ruby
Split.configure do |config|
config.experiments = {
link_color: {
alternatives: ["red", "blue"],
goals: ["purchase", "refund"]
}
}
end
```
To complete a goal conversion, you do it like:
```ruby
ab_finished(link_color: "purchase")
```
Note that if you pass additional options, that should be a separate hash:
```ruby
ab_finished({ link_color: "purchase" }, reset: false)
```
**NOTE:** This does not mean that a single experiment can complete more than one goal.
Once you finish one of the goals, the test is considered to be completed, and finishing the other goal will no longer register. (Assuming the test runs with `reset: false`.)
**Good Example**: Test if listing Plan A first result in more conversions to Plan A (goal: "plana_conversion") or Plan B (goal: "planb_conversion").
**Bad Example**: Test if button color increases conversion rate through multiple steps of a funnel. THIS WILL NOT WORK.
**Bad Example**: Test both how button color affects signup *and* how it affects login, at the same time. THIS WILL NOT WORK.
#### Combined Experiments
If you want to test how button color affects signup *and* how it affects login at the same time, use combined experiments.
Configure like so:
```ruby
Split.configuration.experiments = {
:button_color_experiment => {
:alternatives => ["blue", "green"],
:combined_experiments => ["button_color_on_signup", "button_color_on_login"]
}
}
```
Starting the combined test starts all combined experiments
```ruby
ab_combined_test(:button_color_experiment)
```
Finish each combined test as normal
```ruby
ab_finished(:button_color_on_login)
ab_finished(:button_color_on_signup)
```
**Additional Configuration**:
* Be sure to enable `allow_multiple_experiments`
* In Sinatra include the CombinedExperimentsHelper
```
helpers Split::CombinedExperimentsHelper
```
### DB failover solution
Due to the fact that Redis has no automatic failover mechanism, it's
possible to switch on the `db_failover` config option, so that `ab_test`
and `ab_finished` will not crash in case of a db failure. `ab_test` always
delivers alternative A (the first one) in that case.
It's also possible to set a `db_failover_on_db_error` callback (proc)
for example to log these errors via Rails.logger.
### Redis
You may want to change the Redis host and port Split connects to, or
set various other options at startup.
Split has a `redis` setter which can be given a string or a Redis
object. This means if you're already using Redis in your app, Split
can re-use the existing connection.
String: `Split.redis = 'redis://localhost:6379'`
Redis: `Split.redis = $redis`
For our rails app we have a `config/initializers/split.rb` file where
we load `config/split.yml` by hand and set the Redis information
appropriately.
Here's our `config/split.yml`:
```yml
development: redis://localhost:6379
test: redis://localhost:6379
staging: redis://redis1.example.com:6379
fi: redis://localhost:6379
production: redis://redis1.example.com:6379
```
And our initializer:
```ruby
split_config = YAML.load_file(Rails.root.join('config', 'split.yml'))
Split.redis = split_config[Rails.env]
```
### Redis Caching (v4.0+)
In some high-volume usage scenarios, Redis load can be incurred by repeated
fetches for fairly static data. Enabling caching will reduce this load.
```ruby
Split.configuration.cache = true
````
This currently caches:
- `Split::ExperimentCatalog.find`
- `Split::Experiment.start_time`
- `Split::Experiment.winner`
## Namespaces
If you're running multiple, separate instances of Split you may want
to namespace the keyspaces so they do not overlap. This is not unlike
the approach taken by many memcached clients.
This feature can be provided by the [redis-namespace](https://github.com/defunkt/redis-namespace)
library. To configure Split to use `Redis::Namespace`, do the following:
1. Add `redis-namespace` to your Gemfile:
```ruby
gem 'redis-namespace'
```
2. Configure `Split.redis` to use a `Redis::Namespace` instance (possible in an
initializer):
```ruby
redis = Redis.new(url: ENV['REDIS_URL']) # or whatever config you want
Split.redis = Redis::Namespace.new(:your_namespace, redis: redis)
```
## Outside of a Web Session
Split provides the Helper module to facilitate running experiments inside web sessions.
Alternatively, you can access the underlying Metric, Trial, Experiment and Alternative objects to
conduct experiments that are not tied to a web session.
```ruby
# create a new experiment
experiment = Split::ExperimentCatalog.find_or_create('color', 'red', 'blue')
# create a new trial
trial = Split::Trial.new(:experiment => experiment)
# run trial
trial.choose!
# get the result, returns either red or blue
trial.alternative.name
# if the goal has been achieved, increment the successful completions for this alternative.
if goal_achieved?
trial.complete!
end
```
## Algorithms
By default, Split ships with `Split::Algorithms::WeightedSample` that randomly selects from possible alternatives for a traditional a/b test.
It is possible to specify static weights to favor certain alternatives.
`Split::Algorithms::Whiplash` is an implementation of a [multi-armed bandit algorithm](http://stevehanov.ca/blog/index.php?id=132).
This algorithm will automatically weight the alternatives based on their relative performance,
choosing the better-performing ones more often as trials are completed.
`Split::Algorithms::BlockRandomization` is an algorithm that ensures equal
participation across all alternatives. This algorithm will choose the alternative
with the fewest participants. In the event of multiple minimum participant alternatives
(i.e. starting a new "Block") the algorithm will choose a random alternative from
those minimum participant alternatives.
Users may also write their own algorithms. The default algorithm may be specified globally in the configuration file, or on a per experiment basis using the experiments hash of the configuration file.
To change the algorithm globally for all experiments, use the following in your initializer:
```ruby
Split.configure do |config|
config.algorithm = Split::Algorithms::Whiplash
end
```
## Extensions
- [Split::Export](https://github.com/splitrb/split-export) - Easily export A/B test data out of Split.
- [Split::Analytics](https://github.com/splitrb/split-analytics) - Push test data to Google Analytics.
- [Split::Mongoid](https://github.com/MongoHQ/split-mongoid) - Store experiment data in mongoid (still uses redis).
- [Split::Cacheable](https://github.com/harrystech/split_cacheable) - Automatically create cache buckets per test.
- [Split::Counters](https://github.com/bernardkroes/split-counters) - Add counters per experiment and alternative.
- [Split::Cli](https://github.com/craigmcnamara/split-cli) - A CLI to trigger Split A/B tests.
## Screencast
Ryan bates has produced an excellent 10 minute screencast about split on the Railscasts site: [A/B Testing with Split](http://railscasts.com/episodes/331-a-b-testing-with-split)
## Blogposts
* [Recipe: A/B testing with KISSMetrics and the split gem](https://robots.thoughtbot.com/post/9595887299/recipe-a-b-testing-with-kissmetrics-and-the-split-gem)
* [Rails A/B testing with Split on Heroku](http://blog.nathanhumbert.com/2012/02/rails-ab-testing-with-split-on-heroku.html)
## Backers
Support us with a monthly donation and help us continue our activities. [[Become a backer](https://opencollective.com/split#backer)]
<a href="https://opencollective.com/split/backer/0/website" target="_blank"><img src="https://opencollective.com/split/backer/0/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/1/website" target="_blank"><img src="https://opencollective.com/split/backer/1/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/2/website" target="_blank"><img src="https://opencollective.com/split/backer/2/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/3/website" target="_blank"><img src="https://opencollective.com/split/backer/3/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/4/website" target="_blank"><img src="https://opencollective.com/split/backer/4/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/5/website" target="_blank"><img src="https://opencollective.com/split/backer/5/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/6/website" target="_blank"><img src="https://opencollective.com/split/backer/6/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/7/website" target="_blank"><img src="https://opencollective.com/split/backer/7/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/8/website" target="_blank"><img src="https://opencollective.com/split/backer/8/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/9/website" target="_blank"><img src="https://opencollective.com/split/backer/9/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/10/website" target="_blank"><img src="https://opencollective.com/split/backer/10/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/11/website" target="_blank"><img src="https://opencollective.com/split/backer/11/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/12/website" target="_blank"><img src="https://opencollective.com/split/backer/12/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/13/website" target="_blank"><img src="https://opencollective.com/split/backer/13/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/14/website" target="_blank"><img src="https://opencollective.com/split/backer/14/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/15/website" target="_blank"><img src="https://opencollective.com/split/backer/15/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/16/website" target="_blank"><img src="https://opencollective.com/split/backer/16/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/17/website" target="_blank"><img src="https://opencollective.com/split/backer/17/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/18/website" target="_blank"><img src="https://opencollective.com/split/backer/18/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/19/website" target="_blank"><img src="https://opencollective.com/split/backer/19/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/20/website" target="_blank"><img src="https://opencollective.com/split/backer/20/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/21/website" target="_blank"><img src="https://opencollective.com/split/backer/21/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/22/website" target="_blank"><img src="https://opencollective.com/split/backer/22/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/23/website" target="_blank"><img src="https://opencollective.com/split/backer/23/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/24/website" target="_blank"><img src="https://opencollective.com/split/backer/24/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/25/website" target="_blank"><img src="https://opencollective.com/split/backer/25/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/26/website" target="_blank"><img src="https://opencollective.com/split/backer/26/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/27/website" target="_blank"><img src="https://opencollective.com/split/backer/27/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/28/website" target="_blank"><img src="https://opencollective.com/split/backer/28/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/29/website" target="_blank"><img src="https://opencollective.com/split/backer/29/avatar.svg"></a>
## Sponsors
Become a sponsor and get your logo on our README on Github with a link to your site. [[Become a sponsor](https://opencollective.com/split#sponsor)]
<a href="https://opencollective.com/split/sponsor/0/website" target="_blank"><img src="https://opencollective.com/split/sponsor/0/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/1/website" target="_blank"><img src="https://opencollective.com/split/sponsor/1/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/2/website" target="_blank"><img src="https://opencollective.com/split/sponsor/2/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/3/website" target="_blank"><img src="https://opencollective.com/split/sponsor/3/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/4/website" target="_blank"><img src="https://opencollective.com/split/sponsor/4/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/5/website" target="_blank"><img src="https://opencollective.com/split/sponsor/5/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/6/website" target="_blank"><img src="https://opencollective.com/split/sponsor/6/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/7/website" target="_blank"><img src="https://opencollective.com/split/sponsor/7/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/8/website" target="_blank"><img src="https://opencollective.com/split/sponsor/8/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/9/website" target="_blank"><img src="https://opencollective.com/split/sponsor/9/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/10/website" target="_blank"><img src="https://opencollective.com/split/sponsor/10/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/11/website" target="_blank"><img src="https://opencollective.com/split/sponsor/11/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/12/website" target="_blank"><img src="https://opencollective.com/split/sponsor/12/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/13/website" target="_blank"><img src="https://opencollective.com/split/sponsor/13/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/14/website" target="_blank"><img src="https://opencollective.com/split/sponsor/14/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/15/website" target="_blank"><img src="https://opencollective.com/split/sponsor/15/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/16/website" target="_blank"><img src="https://opencollective.com/split/sponsor/16/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/17/website" target="_blank"><img src="https://opencollective.com/split/sponsor/17/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/18/website" target="_blank"><img src="https://opencollective.com/split/sponsor/18/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/19/website" target="_blank"><img src="https://opencollective.com/split/sponsor/19/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/20/website" target="_blank"><img src="https://opencollective.com/split/sponsor/20/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/21/website" target="_blank"><img src="https://opencollective.com/split/sponsor/21/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/22/website" target="_blank"><img src="https://opencollective.com/split/sponsor/22/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/23/website" target="_blank"><img src="https://opencollective.com/split/sponsor/23/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/24/website" target="_blank"><img src="https://opencollective.com/split/sponsor/24/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/25/website" target="_blank"><img src="https://opencollective.com/split/sponsor/25/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/26/website" target="_blank"><img src="https://opencollective.com/split/sponsor/26/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/27/website" target="_blank"><img src="https://opencollective.com/split/sponsor/27/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/28/website" target="_blank"><img src="https://opencollective.com/split/sponsor/28/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/29/website" target="_blank"><img src="https://opencollective.com/split/sponsor/29/avatar.svg"></a>
## Contribute
Please do! Over 70 different people have contributed to the project, you can see them all here: https://github.com/splitrb/split/graphs/contributors.
### Development
The source code is hosted at [GitHub](https://github.com/splitrb/split).
Report issues and feature requests on [GitHub Issues](https://github.com/splitrb/split/issues).
You can find a discussion form on [Google Groups](https://groups.google.com/d/forum/split-ruby).
### Tests
Run the tests like this:
# Start a Redis server in another tab.
redis-server
bundle
rake spec
### A Note on Patches and Pull Requests
* Fork the project.
* Make your feature addition or bug fix.
* Add tests for it. This is important so I don't break it in a
future version unintentionally.
* Add documentation if necessary.
* Commit. Do not mess with the rakefile, version, or history.
(If you want to have your own version, that is fine. But bump the version in a commit by itself, which I can ignore when I pull.)
* Send a pull request. Bonus points for topic branches.
### Code of Conduct
Please note that this project is released with a [Contributor Code of Conduct](CODE_OF_CONDUCT.md). By participating in this project you agree to abide by its terms.
## Copyright
[MIT License](LICENSE) © 2019 [Andrew Nesbitt](https://github.com/andrew).
<MSG> Merge pull request #297 from ekorneeff/master
Update README section about metadata
<DFF> @@ -455,8 +455,8 @@ my_first_experiment:
meta:
a:
text: "Have a fantastic day"
- b:
- text: "Don't get hit by a bus"
+ b:
+ text: "Don't get hit by a bus"
```
This allows for some advanced experiment configuration using methods like:
@@ -467,6 +467,15 @@ trial.alternative.name # => "a"
trial.metadata['text'] # => "Have a fantastic day"
```
+or in views:
+
+```erb
+<% ab_test("my_first_experiment") do |alternative, meta| %>
+ <%= alternative %>
+ <small><%= meta['text'] %></small>
+<% end %>
+```
+
#### Metrics
You might wish to track generic metrics, such as conversions, and use
| 11 | Merge pull request #297 from ekorneeff/master | 2 | .md | md | mit | splitrb/split |
10071112 | <NME> style.css
<BEF> html {
background: #efefef;
font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif;
font-size: 13px;
}
body {
padding: 0 10px;
margin: 10px auto 0;
}
.header {
background: #ededed;
background: -webkit-gradient(linear, left top, left bottom,
background: -webkit-gradient(linear, left top, left bottom,
color-stop(0%,#576a76),
color-stop(100%,#4d5256));
background: -moz-linear-gradient (top, #576a76 0%, #414e58 100%);
background: -webkit-linear-gradient(top, #576a76 0%, #414e58 100%);
background: -o-linear-gradient (top, #576a76 0%, #414e58 100%);
background: -ms-linear-gradient (top, #576a76 0%, #414e58 100%);
background: linear-gradient (top, #576a76 0%, #414e58 100%);
border-bottom: 1px solid #fff;
-moz-border-radius-topleft: 5px;
-webkit-border-top-left-radius: 5px;
-moz-border-radius-topright: 5px;
-webkit-border-top-right-radius:5px;
border-top-right-radius: 5px;
overflow:hidden;
padding: 10px 5%;
text-shadow:0 1px 0 #000;
}
.header h1 {
color: #eee;
float:left;
font-size:1.2em;
font-weight:normal;
margin:2px 30px 0 0;
}
.header ul li {
display: inline;
}
.header ul li a {
color: #eee;
text-decoration: none;
margin-right: 10px;
display: inline-block;
padding: 4px 8px;
-moz-border-radius: 10px;
-webkit-border-radius:10px;
border-radius: 10px;
}
.header ul li a:hover {
background: rgba(255,255,255,0.1);
}
.header ul li a:active {
-moz-box-shadow: inset 0 1px 0 rgba(0,0,0,0.2);
-webkit-box-shadow:inset 0 1px 0 rgba(0,0,0,0.2);
box-shadow: inset 0 1px 0 rgba(0,0,0,0.2);
}
.header ul li.current a {
background: rgba(255,255,255,0.1);
-moz-box-shadow: inset 0 1px 0 rgba(0,0,0,0.2);
-webkit-box-shadow:inset 0 1px 0 rgba(0,0,0,0.2);
box-shadow: inset 0 1px 0 rgba(0,0,0,0.2);
color: #fff;
}
.header p.environment {
clear: both;
padding: 10px 0 0 0;
color: #BBB;
font-style: italic;
float: right;
}
#main {
padding: 10px 5%;
background: #f9f9f9;
border:1px solid #ccc;
border-top:none;
-moz-box-shadow: 0 3px 10px rgba(0,0,0,0.2);
-webkit-box-shadow:0 3px 10px rgba(0,0,0,0.2);
box-shadow: 0 3px 10px rgba(0,0,0,0.2);
overflow: hidden;
}
#main .logo {
float: right;
margin: 10px;
}
#main span.hl {
background: #efefef;
padding: 2px;
}
#main h1 {
margin: 10px 0;
font-size: 190%;
font-weight: bold;
color: #0080FF;
}
#main table {
width: 100%;
margin:0 0 10px;
}
#main table tr td, #main table tr th {
border-bottom: 1px solid #ccc;
padding: 6px;
}
#main table tr th {
background: #efefef;
color: #888;
font-size: 80%;
text-transform:uppercase;
}
#main table tr td.no-data {
text-align: center;
padding: 40px 0;
color: #999;
font-style: italic;
font-size: 130%;
}
#main a {
color: #111;
}
#main p {
margin: 5px 0;
}
#main p.intro {
margin-bottom: 15px;
font-size: 85%;
color: #999;
margin-top: 0;
line-height: 1.3;
}
#main h1.wi {
margin-bottom: 5px;
}
#main p.sub {
font-size: 95%;
color: #999;
}
.experiment {
background:#fff;
border: 1px solid #eee;
border-bottom:none;
margin:30px 0;
}
.experiment_with_goal {
margin: -32px 0 30px 0;
}
.experiment .experiment-header {
background: #f4f4f4;
background: -webkit-gradient(linear, left top, left bottom,
color-stop(0%,#f4f4f4),
color-stop(100%,#e0e0e0));
background: -moz-linear-gradient (top, #f4f4f4 0%, #e0e0e0 100%);
background: -webkit-linear-gradient(top, #f4f4f4 0%, #e0e0e0 100%);
background: -o-linear-gradient (top, #f4f4f4 0%, #e0e0e0 100%);
background: -ms-linear-gradient (top, #f4f4f4 0%, #e0e0e0 100%);
background: linear-gradient (top, #f4f4f4 0%, #e0e0e0 100%);
border-top:1px solid #fff;
overflow:hidden;
padding:0 10px;
}
.experiment h2 {
color:#888;
margin: 12px 0 12px 0;
font-size: 1em;
font-weight:bold;
float:left;
text-shadow:0 1px 0 rgba(255,255,255,0.8);
}
.experiment h2 .goal {
font-style: italic;
}
.experiment h2 .version {
font-style:italic;
font-size:0.8em;
color:#bbb;
font-weight:normal;
}
.experiment table em{
font-style:italic;
font-size:0.9em;
color:#bbb;
}
.experiment table .totals td {
background: #eee;
font-weight: bold;
}
#footer {
padding: 10px 5%;
color: #999;
font-size: 85%;
line-height: 1.5;
padding-top: 10px;
}
#footer p a {
color: #999;
}
.inline-controls {
float:right;
}
.inline-controls small {
color: #888;
font-size: 11px;
}
.inline-controls form {
display: inline-block;
font-size: 10px;
line-height: 38px;
}
.inline-controls input {
margin-left: 10px;
}
.worse, .better {
color: #773F3F;
font-size: 10px;
font-weight:bold;
}
.better {
color: #408C48;
}
.experiment a.button, .experiment button, .experiment input[type="submit"] {
padding: 4px 10px;
overflow: hidden;
background: #d8dae0;
-moz-box-shadow: 0 1px 0 rgba(0,0,0,0.5);
-webkit-box-shadow:0 1px 0 rgba(0,0,0,0.5);
box-shadow: 0 1px 0 rgba(0,0,0,0.5);
border:none;
-moz-border-radius: 30px;
-webkit-border-radius:30px;
border-radius: 30px;
color:#2e3035;
cursor: pointer;
font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif;
text-decoration: none;
text-shadow:0 1px 0 rgba(255,255,255,0.8);
-moz-user-select: none;
-webkit-user-select:none;
user-select: none;
white-space: nowrap;
}
a.button:hover, button:hover, input[type="submit"]:hover,
a.button:focus, button:focus, input[type="submit"]:focus{
background:#bbbfc7;
}
a.button:active, button:active, input[type="submit"]:active{
-moz-box-shadow: inset 0 0 4px #484d57;
-webkit-box-shadow:inset 0 0 4px #484d57;
box-shadow: inset 0 0 4px #484d57;
position:relative;
top:1px;
}
a.button.red, button.red, input[type="submit"].red,
a.button.green, button.green, input[type="submit"].green {
color:#fff;
text-shadow:0 1px 0 rgba(0,0,0,0.4);
}
a.button.red, button.red, input[type="submit"].red {
background:#a56d6d;
}
a.button.red:hover, button.red:hover, input[type="submit"].red:hover,
a.button.red:focus, button.red:focus, input[type="submit"].red:focus {
background:#895C5C;
}
a.button.green, button.green, input[type="submit"].green {
background:#8daa92;
}
a.button.green:hover, button.green:hover, input[type="submit"].green:hover,
a.button.green:focus, button.green:focus, input[type="submit"].green:focus {
background:#768E7A;
}
.dashboard-controls input, .dashboard-controls select {
padding: 10px;
}
.dashboard-controls-bottom {
margin-top: 10px;
}
.pagination {
text-align: center;
font-size: 15px;
}
.pagination a, .paginaton span {
display: inline-block;
padding: 5px;
}
.divider {
display: inline-block;
margin-left: 10px;
}
<MSG> Merge pull request #69 from philnash/phil/fix-header-in-ff
Fixes header gradient in FF/Opera.
<DFF> @@ -15,11 +15,11 @@ body {
background: -webkit-gradient(linear, left top, left bottom,
color-stop(0%,#576a76),
color-stop(100%,#4d5256));
- background: -moz-linear-gradient (top, #576a76 0%, #414e58 100%);
+ background: -moz-linear-gradient(top, #576076 0%, #414e58 100%);
background: -webkit-linear-gradient(top, #576a76 0%, #414e58 100%);
- background: -o-linear-gradient (top, #576a76 0%, #414e58 100%);
- background: -ms-linear-gradient (top, #576a76 0%, #414e58 100%);
- background: linear-gradient (top, #576a76 0%, #414e58 100%);
+ background: -o-linear-gradient(top, #576a76 0%, #414e58 100%);
+ background: -ms-linear-gradient(top, #576a76 0%, #414e58 100%);
+ background: linear-gradient(top, #576a76 0%, #414e58 100%);
border-bottom: 1px solid #fff;
-moz-border-radius-topleft: 5px;
-webkit-border-top-left-radius: 5px;
| 4 | Merge pull request #69 from philnash/phil/fix-header-in-ff | 4 | .css | css | mit | splitrb/split |
10071113 | <NME> style.css
<BEF> html {
background: #efefef;
font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif;
font-size: 13px;
}
body {
padding: 0 10px;
margin: 10px auto 0;
}
.header {
background: #ededed;
background: -webkit-gradient(linear, left top, left bottom,
background: -webkit-gradient(linear, left top, left bottom,
color-stop(0%,#576a76),
color-stop(100%,#4d5256));
background: -moz-linear-gradient (top, #576a76 0%, #414e58 100%);
background: -webkit-linear-gradient(top, #576a76 0%, #414e58 100%);
background: -o-linear-gradient (top, #576a76 0%, #414e58 100%);
background: -ms-linear-gradient (top, #576a76 0%, #414e58 100%);
background: linear-gradient (top, #576a76 0%, #414e58 100%);
border-bottom: 1px solid #fff;
-moz-border-radius-topleft: 5px;
-webkit-border-top-left-radius: 5px;
-moz-border-radius-topright: 5px;
-webkit-border-top-right-radius:5px;
border-top-right-radius: 5px;
overflow:hidden;
padding: 10px 5%;
text-shadow:0 1px 0 #000;
}
.header h1 {
color: #eee;
float:left;
font-size:1.2em;
font-weight:normal;
margin:2px 30px 0 0;
}
.header ul li {
display: inline;
}
.header ul li a {
color: #eee;
text-decoration: none;
margin-right: 10px;
display: inline-block;
padding: 4px 8px;
-moz-border-radius: 10px;
-webkit-border-radius:10px;
border-radius: 10px;
}
.header ul li a:hover {
background: rgba(255,255,255,0.1);
}
.header ul li a:active {
-moz-box-shadow: inset 0 1px 0 rgba(0,0,0,0.2);
-webkit-box-shadow:inset 0 1px 0 rgba(0,0,0,0.2);
box-shadow: inset 0 1px 0 rgba(0,0,0,0.2);
}
.header ul li.current a {
background: rgba(255,255,255,0.1);
-moz-box-shadow: inset 0 1px 0 rgba(0,0,0,0.2);
-webkit-box-shadow:inset 0 1px 0 rgba(0,0,0,0.2);
box-shadow: inset 0 1px 0 rgba(0,0,0,0.2);
color: #fff;
}
.header p.environment {
clear: both;
padding: 10px 0 0 0;
color: #BBB;
font-style: italic;
float: right;
}
#main {
padding: 10px 5%;
background: #f9f9f9;
border:1px solid #ccc;
border-top:none;
-moz-box-shadow: 0 3px 10px rgba(0,0,0,0.2);
-webkit-box-shadow:0 3px 10px rgba(0,0,0,0.2);
box-shadow: 0 3px 10px rgba(0,0,0,0.2);
overflow: hidden;
}
#main .logo {
float: right;
margin: 10px;
}
#main span.hl {
background: #efefef;
padding: 2px;
}
#main h1 {
margin: 10px 0;
font-size: 190%;
font-weight: bold;
color: #0080FF;
}
#main table {
width: 100%;
margin:0 0 10px;
}
#main table tr td, #main table tr th {
border-bottom: 1px solid #ccc;
padding: 6px;
}
#main table tr th {
background: #efefef;
color: #888;
font-size: 80%;
text-transform:uppercase;
}
#main table tr td.no-data {
text-align: center;
padding: 40px 0;
color: #999;
font-style: italic;
font-size: 130%;
}
#main a {
color: #111;
}
#main p {
margin: 5px 0;
}
#main p.intro {
margin-bottom: 15px;
font-size: 85%;
color: #999;
margin-top: 0;
line-height: 1.3;
}
#main h1.wi {
margin-bottom: 5px;
}
#main p.sub {
font-size: 95%;
color: #999;
}
.experiment {
background:#fff;
border: 1px solid #eee;
border-bottom:none;
margin:30px 0;
}
.experiment_with_goal {
margin: -32px 0 30px 0;
}
.experiment .experiment-header {
background: #f4f4f4;
background: -webkit-gradient(linear, left top, left bottom,
color-stop(0%,#f4f4f4),
color-stop(100%,#e0e0e0));
background: -moz-linear-gradient (top, #f4f4f4 0%, #e0e0e0 100%);
background: -webkit-linear-gradient(top, #f4f4f4 0%, #e0e0e0 100%);
background: -o-linear-gradient (top, #f4f4f4 0%, #e0e0e0 100%);
background: -ms-linear-gradient (top, #f4f4f4 0%, #e0e0e0 100%);
background: linear-gradient (top, #f4f4f4 0%, #e0e0e0 100%);
border-top:1px solid #fff;
overflow:hidden;
padding:0 10px;
}
.experiment h2 {
color:#888;
margin: 12px 0 12px 0;
font-size: 1em;
font-weight:bold;
float:left;
text-shadow:0 1px 0 rgba(255,255,255,0.8);
}
.experiment h2 .goal {
font-style: italic;
}
.experiment h2 .version {
font-style:italic;
font-size:0.8em;
color:#bbb;
font-weight:normal;
}
.experiment table em{
font-style:italic;
font-size:0.9em;
color:#bbb;
}
.experiment table .totals td {
background: #eee;
font-weight: bold;
}
#footer {
padding: 10px 5%;
color: #999;
font-size: 85%;
line-height: 1.5;
padding-top: 10px;
}
#footer p a {
color: #999;
}
.inline-controls {
float:right;
}
.inline-controls small {
color: #888;
font-size: 11px;
}
.inline-controls form {
display: inline-block;
font-size: 10px;
line-height: 38px;
}
.inline-controls input {
margin-left: 10px;
}
.worse, .better {
color: #773F3F;
font-size: 10px;
font-weight:bold;
}
.better {
color: #408C48;
}
.experiment a.button, .experiment button, .experiment input[type="submit"] {
padding: 4px 10px;
overflow: hidden;
background: #d8dae0;
-moz-box-shadow: 0 1px 0 rgba(0,0,0,0.5);
-webkit-box-shadow:0 1px 0 rgba(0,0,0,0.5);
box-shadow: 0 1px 0 rgba(0,0,0,0.5);
border:none;
-moz-border-radius: 30px;
-webkit-border-radius:30px;
border-radius: 30px;
color:#2e3035;
cursor: pointer;
font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif;
text-decoration: none;
text-shadow:0 1px 0 rgba(255,255,255,0.8);
-moz-user-select: none;
-webkit-user-select:none;
user-select: none;
white-space: nowrap;
}
a.button:hover, button:hover, input[type="submit"]:hover,
a.button:focus, button:focus, input[type="submit"]:focus{
background:#bbbfc7;
}
a.button:active, button:active, input[type="submit"]:active{
-moz-box-shadow: inset 0 0 4px #484d57;
-webkit-box-shadow:inset 0 0 4px #484d57;
box-shadow: inset 0 0 4px #484d57;
position:relative;
top:1px;
}
a.button.red, button.red, input[type="submit"].red,
a.button.green, button.green, input[type="submit"].green {
color:#fff;
text-shadow:0 1px 0 rgba(0,0,0,0.4);
}
a.button.red, button.red, input[type="submit"].red {
background:#a56d6d;
}
a.button.red:hover, button.red:hover, input[type="submit"].red:hover,
a.button.red:focus, button.red:focus, input[type="submit"].red:focus {
background:#895C5C;
}
a.button.green, button.green, input[type="submit"].green {
background:#8daa92;
}
a.button.green:hover, button.green:hover, input[type="submit"].green:hover,
a.button.green:focus, button.green:focus, input[type="submit"].green:focus {
background:#768E7A;
}
.dashboard-controls input, .dashboard-controls select {
padding: 10px;
}
.dashboard-controls-bottom {
margin-top: 10px;
}
.pagination {
text-align: center;
font-size: 15px;
}
.pagination a, .paginaton span {
display: inline-block;
padding: 5px;
}
.divider {
display: inline-block;
margin-left: 10px;
}
<MSG> Merge pull request #69 from philnash/phil/fix-header-in-ff
Fixes header gradient in FF/Opera.
<DFF> @@ -15,11 +15,11 @@ body {
background: -webkit-gradient(linear, left top, left bottom,
color-stop(0%,#576a76),
color-stop(100%,#4d5256));
- background: -moz-linear-gradient (top, #576a76 0%, #414e58 100%);
+ background: -moz-linear-gradient(top, #576076 0%, #414e58 100%);
background: -webkit-linear-gradient(top, #576a76 0%, #414e58 100%);
- background: -o-linear-gradient (top, #576a76 0%, #414e58 100%);
- background: -ms-linear-gradient (top, #576a76 0%, #414e58 100%);
- background: linear-gradient (top, #576a76 0%, #414e58 100%);
+ background: -o-linear-gradient(top, #576a76 0%, #414e58 100%);
+ background: -ms-linear-gradient(top, #576a76 0%, #414e58 100%);
+ background: linear-gradient(top, #576a76 0%, #414e58 100%);
border-bottom: 1px solid #fff;
-moz-border-radius-topleft: 5px;
-webkit-border-top-left-radius: 5px;
| 4 | Merge pull request #69 from philnash/phil/fix-header-in-ff | 4 | .css | css | mit | splitrb/split |
10071114 | <NME> style.css
<BEF> html {
background: #efefef;
font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif;
font-size: 13px;
}
body {
padding: 0 10px;
margin: 10px auto 0;
}
.header {
background: #ededed;
background: -webkit-gradient(linear, left top, left bottom,
background: -webkit-gradient(linear, left top, left bottom,
color-stop(0%,#576a76),
color-stop(100%,#4d5256));
background: -moz-linear-gradient (top, #576a76 0%, #414e58 100%);
background: -webkit-linear-gradient(top, #576a76 0%, #414e58 100%);
background: -o-linear-gradient (top, #576a76 0%, #414e58 100%);
background: -ms-linear-gradient (top, #576a76 0%, #414e58 100%);
background: linear-gradient (top, #576a76 0%, #414e58 100%);
border-bottom: 1px solid #fff;
-moz-border-radius-topleft: 5px;
-webkit-border-top-left-radius: 5px;
-moz-border-radius-topright: 5px;
-webkit-border-top-right-radius:5px;
border-top-right-radius: 5px;
overflow:hidden;
padding: 10px 5%;
text-shadow:0 1px 0 #000;
}
.header h1 {
color: #eee;
float:left;
font-size:1.2em;
font-weight:normal;
margin:2px 30px 0 0;
}
.header ul li {
display: inline;
}
.header ul li a {
color: #eee;
text-decoration: none;
margin-right: 10px;
display: inline-block;
padding: 4px 8px;
-moz-border-radius: 10px;
-webkit-border-radius:10px;
border-radius: 10px;
}
.header ul li a:hover {
background: rgba(255,255,255,0.1);
}
.header ul li a:active {
-moz-box-shadow: inset 0 1px 0 rgba(0,0,0,0.2);
-webkit-box-shadow:inset 0 1px 0 rgba(0,0,0,0.2);
box-shadow: inset 0 1px 0 rgba(0,0,0,0.2);
}
.header ul li.current a {
background: rgba(255,255,255,0.1);
-moz-box-shadow: inset 0 1px 0 rgba(0,0,0,0.2);
-webkit-box-shadow:inset 0 1px 0 rgba(0,0,0,0.2);
box-shadow: inset 0 1px 0 rgba(0,0,0,0.2);
color: #fff;
}
.header p.environment {
clear: both;
padding: 10px 0 0 0;
color: #BBB;
font-style: italic;
float: right;
}
#main {
padding: 10px 5%;
background: #f9f9f9;
border:1px solid #ccc;
border-top:none;
-moz-box-shadow: 0 3px 10px rgba(0,0,0,0.2);
-webkit-box-shadow:0 3px 10px rgba(0,0,0,0.2);
box-shadow: 0 3px 10px rgba(0,0,0,0.2);
overflow: hidden;
}
#main .logo {
float: right;
margin: 10px;
}
#main span.hl {
background: #efefef;
padding: 2px;
}
#main h1 {
margin: 10px 0;
font-size: 190%;
font-weight: bold;
color: #0080FF;
}
#main table {
width: 100%;
margin:0 0 10px;
}
#main table tr td, #main table tr th {
border-bottom: 1px solid #ccc;
padding: 6px;
}
#main table tr th {
background: #efefef;
color: #888;
font-size: 80%;
text-transform:uppercase;
}
#main table tr td.no-data {
text-align: center;
padding: 40px 0;
color: #999;
font-style: italic;
font-size: 130%;
}
#main a {
color: #111;
}
#main p {
margin: 5px 0;
}
#main p.intro {
margin-bottom: 15px;
font-size: 85%;
color: #999;
margin-top: 0;
line-height: 1.3;
}
#main h1.wi {
margin-bottom: 5px;
}
#main p.sub {
font-size: 95%;
color: #999;
}
.experiment {
background:#fff;
border: 1px solid #eee;
border-bottom:none;
margin:30px 0;
}
.experiment_with_goal {
margin: -32px 0 30px 0;
}
.experiment .experiment-header {
background: #f4f4f4;
background: -webkit-gradient(linear, left top, left bottom,
color-stop(0%,#f4f4f4),
color-stop(100%,#e0e0e0));
background: -moz-linear-gradient (top, #f4f4f4 0%, #e0e0e0 100%);
background: -webkit-linear-gradient(top, #f4f4f4 0%, #e0e0e0 100%);
background: -o-linear-gradient (top, #f4f4f4 0%, #e0e0e0 100%);
background: -ms-linear-gradient (top, #f4f4f4 0%, #e0e0e0 100%);
background: linear-gradient (top, #f4f4f4 0%, #e0e0e0 100%);
border-top:1px solid #fff;
overflow:hidden;
padding:0 10px;
}
.experiment h2 {
color:#888;
margin: 12px 0 12px 0;
font-size: 1em;
font-weight:bold;
float:left;
text-shadow:0 1px 0 rgba(255,255,255,0.8);
}
.experiment h2 .goal {
font-style: italic;
}
.experiment h2 .version {
font-style:italic;
font-size:0.8em;
color:#bbb;
font-weight:normal;
}
.experiment table em{
font-style:italic;
font-size:0.9em;
color:#bbb;
}
.experiment table .totals td {
background: #eee;
font-weight: bold;
}
#footer {
padding: 10px 5%;
color: #999;
font-size: 85%;
line-height: 1.5;
padding-top: 10px;
}
#footer p a {
color: #999;
}
.inline-controls {
float:right;
}
.inline-controls small {
color: #888;
font-size: 11px;
}
.inline-controls form {
display: inline-block;
font-size: 10px;
line-height: 38px;
}
.inline-controls input {
margin-left: 10px;
}
.worse, .better {
color: #773F3F;
font-size: 10px;
font-weight:bold;
}
.better {
color: #408C48;
}
.experiment a.button, .experiment button, .experiment input[type="submit"] {
padding: 4px 10px;
overflow: hidden;
background: #d8dae0;
-moz-box-shadow: 0 1px 0 rgba(0,0,0,0.5);
-webkit-box-shadow:0 1px 0 rgba(0,0,0,0.5);
box-shadow: 0 1px 0 rgba(0,0,0,0.5);
border:none;
-moz-border-radius: 30px;
-webkit-border-radius:30px;
border-radius: 30px;
color:#2e3035;
cursor: pointer;
font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif;
text-decoration: none;
text-shadow:0 1px 0 rgba(255,255,255,0.8);
-moz-user-select: none;
-webkit-user-select:none;
user-select: none;
white-space: nowrap;
}
a.button:hover, button:hover, input[type="submit"]:hover,
a.button:focus, button:focus, input[type="submit"]:focus{
background:#bbbfc7;
}
a.button:active, button:active, input[type="submit"]:active{
-moz-box-shadow: inset 0 0 4px #484d57;
-webkit-box-shadow:inset 0 0 4px #484d57;
box-shadow: inset 0 0 4px #484d57;
position:relative;
top:1px;
}
a.button.red, button.red, input[type="submit"].red,
a.button.green, button.green, input[type="submit"].green {
color:#fff;
text-shadow:0 1px 0 rgba(0,0,0,0.4);
}
a.button.red, button.red, input[type="submit"].red {
background:#a56d6d;
}
a.button.red:hover, button.red:hover, input[type="submit"].red:hover,
a.button.red:focus, button.red:focus, input[type="submit"].red:focus {
background:#895C5C;
}
a.button.green, button.green, input[type="submit"].green {
background:#8daa92;
}
a.button.green:hover, button.green:hover, input[type="submit"].green:hover,
a.button.green:focus, button.green:focus, input[type="submit"].green:focus {
background:#768E7A;
}
.dashboard-controls input, .dashboard-controls select {
padding: 10px;
}
.dashboard-controls-bottom {
margin-top: 10px;
}
.pagination {
text-align: center;
font-size: 15px;
}
.pagination a, .paginaton span {
display: inline-block;
padding: 5px;
}
.divider {
display: inline-block;
margin-left: 10px;
}
<MSG> Merge pull request #69 from philnash/phil/fix-header-in-ff
Fixes header gradient in FF/Opera.
<DFF> @@ -15,11 +15,11 @@ body {
background: -webkit-gradient(linear, left top, left bottom,
color-stop(0%,#576a76),
color-stop(100%,#4d5256));
- background: -moz-linear-gradient (top, #576a76 0%, #414e58 100%);
+ background: -moz-linear-gradient(top, #576076 0%, #414e58 100%);
background: -webkit-linear-gradient(top, #576a76 0%, #414e58 100%);
- background: -o-linear-gradient (top, #576a76 0%, #414e58 100%);
- background: -ms-linear-gradient (top, #576a76 0%, #414e58 100%);
- background: linear-gradient (top, #576a76 0%, #414e58 100%);
+ background: -o-linear-gradient(top, #576a76 0%, #414e58 100%);
+ background: -ms-linear-gradient(top, #576a76 0%, #414e58 100%);
+ background: linear-gradient(top, #576a76 0%, #414e58 100%);
border-bottom: 1px solid #fff;
-moz-border-radius-topleft: 5px;
-webkit-border-top-left-radius: 5px;
| 4 | Merge pull request #69 from philnash/phil/fix-header-in-ff | 4 | .css | css | mit | splitrb/split |
10071115 | <NME> setup.py
<BEF> #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import codecs
try:
from setuptools import setup, find_packages
except ImportError:
from ez_setup import use_setuptools
use_setuptools()
from setuptools import setup, find_packages
setup(
name='chishop',
version=__import__('djangopypi').__version__,
description='Simple PyPI server written in Django.',
author='Ask Solem',
author_email='[email protected]',
packages=["djangopypi"],
install_requires=[
'django>=1.0',
],
'django-registration>0.7',
],
classifiers=[
"Development Status :: 3 - Alpha",
"Environment :: Web Environment",
"Framework :: Django",
"Operating System :: OS Independent",
"Intended Audience :: Developers",
"Intended Audience :: System Administrators",
"License :: OSI Approved :: BSD License",
"Topic :: System :: Software Distribution",
"Programming Language :: Python",
],
long_description=codecs.open('README', "r", "utf-8").read(),
)
<MSG> Merge branch 'master' of [email protected]:ask/chishop
<DFF> @@ -10,13 +10,64 @@ except ImportError:
use_setuptools()
from setuptools import setup, find_packages
+from distutils.command.install_data import install_data
+from distutils.command.install import INSTALL_SCHEMES
+import sys
+
+djangopypi = __import__('djangopypi', {}, {}, [''])
+
+packages, data_files = [], []
+root_dir = os.path.dirname(__file__)
+if root_dir != '':
+ os.chdir(root_dir)
+djangopypi_dir = "djangopypi"
+
+def osx_install_data(install_data):
+ def finalize_options(self):
+ self.set_undefined_options("install", ("install_lib", "install_dir"))
+ install_data.finalize_options(self)
+
+#if sys.platform == "darwin":
+# cmdclasses = {'install_data': osx_install_data}
+#else:
+# cmdclasses = {'install_data': install_data}
+
+
+def fullsplit(path, result=None):
+ if result is None:
+ result = []
+ head, tail = os.path.split(path)
+ if head == '':
+ return [tail] + result
+ if head == path:
+ return result
+ return fullsplit(head, [tail] + result)
+
+
+for scheme in INSTALL_SCHEMES.values():
+ scheme['data'] = scheme['purelib']
+
+
+for dirpath, dirnames, filenames in os.walk(djangopypi_dir):
+ # Ignore dirnames that start with '.'
+ for i, dirname in enumerate(dirnames):
+ if dirname.startswith("."): del dirnames[i]
+ for filename in filenames:
+ if filename.endswith(".py"):
+ packages.append('.'.join(fullsplit(dirpath)))
+ else:
+ data_files.append([dirpath, [os.path.join(dirpath, f) for f in
+ filenames]])
setup(
name='chishop',
- version=__import__('djangopypi').__version__,
+ version=djangopypi.__version__,
description='Simple PyPI server written in Django.',
author='Ask Solem',
author_email='[email protected]',
- packages=["djangopypi"],
+ packages=packages,
+ url="http://ask.github.com/chishop",
+ zip_safe=False,
+ data_files=data_files,
install_requires=[
'django>=1.0',
],
| 53 | Merge branch 'master' of [email protected]:ask/chishop | 2 | .py | py | bsd-3-clause | ask/chishop |
10071116 | <NME> configuration.rb
<BEF> # frozen_string_literal: true
module Split
class Configuration
attr_accessor :ignore_ip_addresses
attr_accessor :ignore_filter
'libwww-perl' => 'Perl client-server library loved by script kids',
'lwp-trivial' => 'Another Perl library loved by script kids',
'msnbot' => 'Microsoft bot',
'SiteUpTime' => 'Site monitoring services',
'Slurp' => 'Yahoo spider',
'WordPress' => 'WordPress spider',
'ZIBB' => 'ZIBB spider',
attr_accessor :persistence_cookie_domain
attr_accessor :algorithm
attr_accessor :store_override
attr_accessor :start_manually
attr_accessor :reset_manually
attr_accessor :on_trial
attr_accessor :on_trial_choose
attr_accessor :on_trial_complete
attr_accessor :on_experiment_reset
attr_accessor :on_experiment_delete
attr_accessor :on_before_experiment_reset
attr_accessor :on_experiment_winner_choose
attr_accessor :on_before_experiment_delete
attr_accessor :include_rails_helper
attr_accessor :beta_probability_simulations
attr_accessor :winning_alternative_recalculation_interval
attr_accessor :redis
attr_accessor :dashboard_pagination_default_per_page
attr_accessor :cache
attr_reader :experiments
attr_writer :bots
attr_writer :robot_regex
def bots
@bots ||= {
# Indexers
"AdsBot-Google" => "Google Adwords",
"Baidu" => "Chinese search engine",
"Baiduspider" => "Chinese search engine",
"bingbot" => "Microsoft bing bot",
"Butterfly" => "Topsy Labs",
"Gigabot" => "Gigabot spider",
"Googlebot" => "Google spider",
"MJ12bot" => "Majestic-12 spider",
"msnbot" => "Microsoft bot",
"rogerbot" => "SeoMoz spider",
"PaperLiBot" => "PaperLi is another content curation service",
"Slurp" => "Yahoo spider",
"Sogou" => "Chinese search engine",
"spider" => "generic web spider",
"UnwindFetchor" => "Gnip crawler",
"WordPress" => "WordPress spider",
"YandexAccessibilityBot" => "Yandex accessibility spider",
"YandexBot" => "Yandex spider",
"YandexMobileBot" => "Yandex mobile spider",
"ZIBB" => "ZIBB spider",
# HTTP libraries
"Apache-HttpClient" => "Java http library",
"AppEngine-Google" => "Google App Engine",
"curl" => "curl unix CLI http client",
"ColdFusion" => "ColdFusion http library",
"EventMachine HttpClient" => "Ruby http library",
"Go http package" => "Go http library",
"Go-http-client" => "Go http library",
"Java" => "Generic Java http library",
"libwww-perl" => "Perl client-server library loved by script kids",
"lwp-trivial" => "Another Perl library loved by script kids",
"Python-urllib" => "Python http library",
"PycURL" => "Python http library",
"Test Certificate Info" => "C http library?",
"Typhoeus" => "Ruby http library",
"Wget" => "wget unix CLI http client",
# URL expanders / previewers
"awe.sm" => "Awe.sm URL expander",
"bitlybot" => "bit.ly bot",
"[email protected]" => "Linkfluence bot",
"facebookexternalhit" => "facebook bot",
"Facebot" => "Facebook crawler",
"Feedfetcher-Google" => "Google Feedfetcher",
"https://developers.google.com/+/web/snippet" => "Google+ Snippet Fetcher",
"LinkedInBot" => "LinkedIn bot",
"LongURL" => "URL expander service",
"NING" => "NING - Yet Another Twitter Swarmer",
"Pinterestbot" => "Pinterest Bot",
"redditbot" => "Reddit Bot",
"ShortLinkTranslate" => "Link shortener",
"Slackbot" => "Slackbot link expander",
"TweetmemeBot" => "TweetMeMe Crawler",
"Twitterbot" => "Twitter URL expander",
"UnwindFetch" => "Gnip URL expander",
"vkShare" => "VKontake Sharer",
# Uptime monitoring
"check_http" => "Nagios monitor",
"GoogleStackdriverMonitoring" => "Google Cloud monitor",
"NewRelicPinger" => "NewRelic monitor",
"Panopta" => "Monitoring service",
"Pingdom" => "Pingdom monitoring",
"SiteUptime" => "Site monitoring services",
"UptimeRobot" => "Monitoring service",
# ???
"DigitalPersona Fingerprint Software" => "HP Fingerprint scanner",
"ShowyouBot" => "Showyou iOS app spider",
"ZyBorg" => "Zyborg? Hmmm....",
"ELB-HealthChecker" => "ELB Health Check"
}
end
def experiments=(experiments)
raise InvalidExperimentsFormatError.new("Experiments must be a Hash") unless experiments.respond_to?(:keys)
@experiments = experiments
end
def disabled?
!enabled
end
def experiment_for(name)
if normalized_experiments
# TODO symbols
normalized_experiments[name.to_sym]
end
end
def metrics
return @metrics if defined?(@metrics)
@metrics = {}
if self.experiments
self.experiments.each do |key, value|
metrics = value_for(value, :metric) rescue nil
Array(metrics).each do |metric_name|
if metric_name
@metrics[metric_name.to_sym] ||= []
@metrics[metric_name.to_sym] << Split::Experiment.new(key)
end
end
end
end
@metrics
end
def normalized_experiments
return nil if @experiments.nil?
experiment_config = {}
@experiments.keys.each do |name|
experiment_config[name.to_sym] = {}
end
@experiments.each do |experiment_name, settings|
alternatives = if (alts = value_for(settings, :alternatives))
normalize_alternatives(alts)
end
experiment_data = {
alternatives: alternatives,
goals: value_for(settings, :goals),
metadata: value_for(settings, :metadata),
algorithm: value_for(settings, :algorithm),
resettable: value_for(settings, :resettable)
}
experiment_data.each do |name, value|
experiment_config[experiment_name.to_sym][name] = value if value != nil
end
end
experiment_config
end
def normalize_alternatives(alternatives)
given_probability, num_with_probability = alternatives.inject([0, 0]) do |a, v|
p, n = a
if percent = value_for(v, :percent)
[p + percent, n + 1]
else
a
end
end
num_without_probability = alternatives.length - num_with_probability
unassigned_probability = ((100.0 - given_probability) / num_without_probability / 100.0)
if num_with_probability.nonzero?
alternatives = alternatives.map do |v|
if (name = value_for(v, :name)) && (percent = value_for(v, :percent))
{ name => percent / 100.0 }
elsif name = value_for(v, :name)
{ name => unassigned_probability }
else
{ v => unassigned_probability }
end
end
[alternatives.shift, alternatives]
else
alternatives = alternatives.dup
[alternatives.shift, alternatives]
end
end
def robot_regex
@robot_regex ||= /\b(?:#{escaped_bots.join('|')})\b|\A\W*\z/i
end
def initialize
@ignore_ip_addresses = []
@ignore_filter = proc { |request| is_robot? || is_ignored_ip_address? }
@db_failover = false
@db_failover_on_db_error = proc { |error| } # e.g. use Rails logger here
@on_experiment_reset = proc { |experiment| }
@on_experiment_delete = proc { |experiment| }
@on_before_experiment_reset = proc { |experiment| }
@on_before_experiment_delete = proc { |experiment| }
@on_experiment_winner_choose = proc { |experiment| }
@db_failover_allow_parameter_override = false
@allow_multiple_experiments = false
@enabled = true
@experiments = {}
@persistence = Split::Persistence::SessionAdapter
@persistence_cookie_length = 31536000 # One year from now
@persistence_cookie_domain = nil
@algorithm = Split::Algorithms::WeightedSample
@include_rails_helper = true
@beta_probability_simulations = 10000
@winning_alternative_recalculation_interval = 60 * 60 * 24 # 1 day
@redis = ENV.fetch(ENV.fetch("REDIS_PROVIDER", "REDIS_URL"), "redis://localhost:6379")
@dashboard_pagination_default_per_page = 10
end
private
def value_for(hash, key)
if hash.kind_of?(Hash)
hash.has_key?(key.to_s) ? hash[key.to_s] : hash[key.to_sym]
end
end
def escaped_bots
bots.map { |key, _| Regexp.escape(key) }
end
end
end
<MSG> corrected typo in robot hash
<DFF> @@ -7,7 +7,7 @@ module Split
'libwww-perl' => 'Perl client-server library loved by script kids',
'lwp-trivial' => 'Another Perl library loved by script kids',
'msnbot' => 'Microsoft bot',
- 'SiteUpTime' => 'Site monitoring services',
+ 'SiteUptime' => 'Site monitoring services',
'Slurp' => 'Yahoo spider',
'WordPress' => 'WordPress spider',
'ZIBB' => 'ZIBB spider',
| 1 | corrected typo in robot hash | 1 | .rb | rb | mit | splitrb/split |
10071117 | <NME> configuration.rb
<BEF> # frozen_string_literal: true
module Split
class Configuration
attr_accessor :ignore_ip_addresses
attr_accessor :ignore_filter
'libwww-perl' => 'Perl client-server library loved by script kids',
'lwp-trivial' => 'Another Perl library loved by script kids',
'msnbot' => 'Microsoft bot',
'SiteUpTime' => 'Site monitoring services',
'Slurp' => 'Yahoo spider',
'WordPress' => 'WordPress spider',
'ZIBB' => 'ZIBB spider',
attr_accessor :persistence_cookie_domain
attr_accessor :algorithm
attr_accessor :store_override
attr_accessor :start_manually
attr_accessor :reset_manually
attr_accessor :on_trial
attr_accessor :on_trial_choose
attr_accessor :on_trial_complete
attr_accessor :on_experiment_reset
attr_accessor :on_experiment_delete
attr_accessor :on_before_experiment_reset
attr_accessor :on_experiment_winner_choose
attr_accessor :on_before_experiment_delete
attr_accessor :include_rails_helper
attr_accessor :beta_probability_simulations
attr_accessor :winning_alternative_recalculation_interval
attr_accessor :redis
attr_accessor :dashboard_pagination_default_per_page
attr_accessor :cache
attr_reader :experiments
attr_writer :bots
attr_writer :robot_regex
def bots
@bots ||= {
# Indexers
"AdsBot-Google" => "Google Adwords",
"Baidu" => "Chinese search engine",
"Baiduspider" => "Chinese search engine",
"bingbot" => "Microsoft bing bot",
"Butterfly" => "Topsy Labs",
"Gigabot" => "Gigabot spider",
"Googlebot" => "Google spider",
"MJ12bot" => "Majestic-12 spider",
"msnbot" => "Microsoft bot",
"rogerbot" => "SeoMoz spider",
"PaperLiBot" => "PaperLi is another content curation service",
"Slurp" => "Yahoo spider",
"Sogou" => "Chinese search engine",
"spider" => "generic web spider",
"UnwindFetchor" => "Gnip crawler",
"WordPress" => "WordPress spider",
"YandexAccessibilityBot" => "Yandex accessibility spider",
"YandexBot" => "Yandex spider",
"YandexMobileBot" => "Yandex mobile spider",
"ZIBB" => "ZIBB spider",
# HTTP libraries
"Apache-HttpClient" => "Java http library",
"AppEngine-Google" => "Google App Engine",
"curl" => "curl unix CLI http client",
"ColdFusion" => "ColdFusion http library",
"EventMachine HttpClient" => "Ruby http library",
"Go http package" => "Go http library",
"Go-http-client" => "Go http library",
"Java" => "Generic Java http library",
"libwww-perl" => "Perl client-server library loved by script kids",
"lwp-trivial" => "Another Perl library loved by script kids",
"Python-urllib" => "Python http library",
"PycURL" => "Python http library",
"Test Certificate Info" => "C http library?",
"Typhoeus" => "Ruby http library",
"Wget" => "wget unix CLI http client",
# URL expanders / previewers
"awe.sm" => "Awe.sm URL expander",
"bitlybot" => "bit.ly bot",
"[email protected]" => "Linkfluence bot",
"facebookexternalhit" => "facebook bot",
"Facebot" => "Facebook crawler",
"Feedfetcher-Google" => "Google Feedfetcher",
"https://developers.google.com/+/web/snippet" => "Google+ Snippet Fetcher",
"LinkedInBot" => "LinkedIn bot",
"LongURL" => "URL expander service",
"NING" => "NING - Yet Another Twitter Swarmer",
"Pinterestbot" => "Pinterest Bot",
"redditbot" => "Reddit Bot",
"ShortLinkTranslate" => "Link shortener",
"Slackbot" => "Slackbot link expander",
"TweetmemeBot" => "TweetMeMe Crawler",
"Twitterbot" => "Twitter URL expander",
"UnwindFetch" => "Gnip URL expander",
"vkShare" => "VKontake Sharer",
# Uptime monitoring
"check_http" => "Nagios monitor",
"GoogleStackdriverMonitoring" => "Google Cloud monitor",
"NewRelicPinger" => "NewRelic monitor",
"Panopta" => "Monitoring service",
"Pingdom" => "Pingdom monitoring",
"SiteUptime" => "Site monitoring services",
"UptimeRobot" => "Monitoring service",
# ???
"DigitalPersona Fingerprint Software" => "HP Fingerprint scanner",
"ShowyouBot" => "Showyou iOS app spider",
"ZyBorg" => "Zyborg? Hmmm....",
"ELB-HealthChecker" => "ELB Health Check"
}
end
def experiments=(experiments)
raise InvalidExperimentsFormatError.new("Experiments must be a Hash") unless experiments.respond_to?(:keys)
@experiments = experiments
end
def disabled?
!enabled
end
def experiment_for(name)
if normalized_experiments
# TODO symbols
normalized_experiments[name.to_sym]
end
end
def metrics
return @metrics if defined?(@metrics)
@metrics = {}
if self.experiments
self.experiments.each do |key, value|
metrics = value_for(value, :metric) rescue nil
Array(metrics).each do |metric_name|
if metric_name
@metrics[metric_name.to_sym] ||= []
@metrics[metric_name.to_sym] << Split::Experiment.new(key)
end
end
end
end
@metrics
end
def normalized_experiments
return nil if @experiments.nil?
experiment_config = {}
@experiments.keys.each do |name|
experiment_config[name.to_sym] = {}
end
@experiments.each do |experiment_name, settings|
alternatives = if (alts = value_for(settings, :alternatives))
normalize_alternatives(alts)
end
experiment_data = {
alternatives: alternatives,
goals: value_for(settings, :goals),
metadata: value_for(settings, :metadata),
algorithm: value_for(settings, :algorithm),
resettable: value_for(settings, :resettable)
}
experiment_data.each do |name, value|
experiment_config[experiment_name.to_sym][name] = value if value != nil
end
end
experiment_config
end
def normalize_alternatives(alternatives)
given_probability, num_with_probability = alternatives.inject([0, 0]) do |a, v|
p, n = a
if percent = value_for(v, :percent)
[p + percent, n + 1]
else
a
end
end
num_without_probability = alternatives.length - num_with_probability
unassigned_probability = ((100.0 - given_probability) / num_without_probability / 100.0)
if num_with_probability.nonzero?
alternatives = alternatives.map do |v|
if (name = value_for(v, :name)) && (percent = value_for(v, :percent))
{ name => percent / 100.0 }
elsif name = value_for(v, :name)
{ name => unassigned_probability }
else
{ v => unassigned_probability }
end
end
[alternatives.shift, alternatives]
else
alternatives = alternatives.dup
[alternatives.shift, alternatives]
end
end
def robot_regex
@robot_regex ||= /\b(?:#{escaped_bots.join('|')})\b|\A\W*\z/i
end
def initialize
@ignore_ip_addresses = []
@ignore_filter = proc { |request| is_robot? || is_ignored_ip_address? }
@db_failover = false
@db_failover_on_db_error = proc { |error| } # e.g. use Rails logger here
@on_experiment_reset = proc { |experiment| }
@on_experiment_delete = proc { |experiment| }
@on_before_experiment_reset = proc { |experiment| }
@on_before_experiment_delete = proc { |experiment| }
@on_experiment_winner_choose = proc { |experiment| }
@db_failover_allow_parameter_override = false
@allow_multiple_experiments = false
@enabled = true
@experiments = {}
@persistence = Split::Persistence::SessionAdapter
@persistence_cookie_length = 31536000 # One year from now
@persistence_cookie_domain = nil
@algorithm = Split::Algorithms::WeightedSample
@include_rails_helper = true
@beta_probability_simulations = 10000
@winning_alternative_recalculation_interval = 60 * 60 * 24 # 1 day
@redis = ENV.fetch(ENV.fetch("REDIS_PROVIDER", "REDIS_URL"), "redis://localhost:6379")
@dashboard_pagination_default_per_page = 10
end
private
def value_for(hash, key)
if hash.kind_of?(Hash)
hash.has_key?(key.to_s) ? hash[key.to_s] : hash[key.to_sym]
end
end
def escaped_bots
bots.map { |key, _| Regexp.escape(key) }
end
end
end
<MSG> corrected typo in robot hash
<DFF> @@ -7,7 +7,7 @@ module Split
'libwww-perl' => 'Perl client-server library loved by script kids',
'lwp-trivial' => 'Another Perl library loved by script kids',
'msnbot' => 'Microsoft bot',
- 'SiteUpTime' => 'Site monitoring services',
+ 'SiteUptime' => 'Site monitoring services',
'Slurp' => 'Yahoo spider',
'WordPress' => 'WordPress spider',
'ZIBB' => 'ZIBB spider',
| 1 | corrected typo in robot hash | 1 | .rb | rb | mit | splitrb/split |
10071118 | <NME> configuration.rb
<BEF> # frozen_string_literal: true
module Split
class Configuration
attr_accessor :ignore_ip_addresses
attr_accessor :ignore_filter
'libwww-perl' => 'Perl client-server library loved by script kids',
'lwp-trivial' => 'Another Perl library loved by script kids',
'msnbot' => 'Microsoft bot',
'SiteUpTime' => 'Site monitoring services',
'Slurp' => 'Yahoo spider',
'WordPress' => 'WordPress spider',
'ZIBB' => 'ZIBB spider',
attr_accessor :persistence_cookie_domain
attr_accessor :algorithm
attr_accessor :store_override
attr_accessor :start_manually
attr_accessor :reset_manually
attr_accessor :on_trial
attr_accessor :on_trial_choose
attr_accessor :on_trial_complete
attr_accessor :on_experiment_reset
attr_accessor :on_experiment_delete
attr_accessor :on_before_experiment_reset
attr_accessor :on_experiment_winner_choose
attr_accessor :on_before_experiment_delete
attr_accessor :include_rails_helper
attr_accessor :beta_probability_simulations
attr_accessor :winning_alternative_recalculation_interval
attr_accessor :redis
attr_accessor :dashboard_pagination_default_per_page
attr_accessor :cache
attr_reader :experiments
attr_writer :bots
attr_writer :robot_regex
def bots
@bots ||= {
# Indexers
"AdsBot-Google" => "Google Adwords",
"Baidu" => "Chinese search engine",
"Baiduspider" => "Chinese search engine",
"bingbot" => "Microsoft bing bot",
"Butterfly" => "Topsy Labs",
"Gigabot" => "Gigabot spider",
"Googlebot" => "Google spider",
"MJ12bot" => "Majestic-12 spider",
"msnbot" => "Microsoft bot",
"rogerbot" => "SeoMoz spider",
"PaperLiBot" => "PaperLi is another content curation service",
"Slurp" => "Yahoo spider",
"Sogou" => "Chinese search engine",
"spider" => "generic web spider",
"UnwindFetchor" => "Gnip crawler",
"WordPress" => "WordPress spider",
"YandexAccessibilityBot" => "Yandex accessibility spider",
"YandexBot" => "Yandex spider",
"YandexMobileBot" => "Yandex mobile spider",
"ZIBB" => "ZIBB spider",
# HTTP libraries
"Apache-HttpClient" => "Java http library",
"AppEngine-Google" => "Google App Engine",
"curl" => "curl unix CLI http client",
"ColdFusion" => "ColdFusion http library",
"EventMachine HttpClient" => "Ruby http library",
"Go http package" => "Go http library",
"Go-http-client" => "Go http library",
"Java" => "Generic Java http library",
"libwww-perl" => "Perl client-server library loved by script kids",
"lwp-trivial" => "Another Perl library loved by script kids",
"Python-urllib" => "Python http library",
"PycURL" => "Python http library",
"Test Certificate Info" => "C http library?",
"Typhoeus" => "Ruby http library",
"Wget" => "wget unix CLI http client",
# URL expanders / previewers
"awe.sm" => "Awe.sm URL expander",
"bitlybot" => "bit.ly bot",
"[email protected]" => "Linkfluence bot",
"facebookexternalhit" => "facebook bot",
"Facebot" => "Facebook crawler",
"Feedfetcher-Google" => "Google Feedfetcher",
"https://developers.google.com/+/web/snippet" => "Google+ Snippet Fetcher",
"LinkedInBot" => "LinkedIn bot",
"LongURL" => "URL expander service",
"NING" => "NING - Yet Another Twitter Swarmer",
"Pinterestbot" => "Pinterest Bot",
"redditbot" => "Reddit Bot",
"ShortLinkTranslate" => "Link shortener",
"Slackbot" => "Slackbot link expander",
"TweetmemeBot" => "TweetMeMe Crawler",
"Twitterbot" => "Twitter URL expander",
"UnwindFetch" => "Gnip URL expander",
"vkShare" => "VKontake Sharer",
# Uptime monitoring
"check_http" => "Nagios monitor",
"GoogleStackdriverMonitoring" => "Google Cloud monitor",
"NewRelicPinger" => "NewRelic monitor",
"Panopta" => "Monitoring service",
"Pingdom" => "Pingdom monitoring",
"SiteUptime" => "Site monitoring services",
"UptimeRobot" => "Monitoring service",
# ???
"DigitalPersona Fingerprint Software" => "HP Fingerprint scanner",
"ShowyouBot" => "Showyou iOS app spider",
"ZyBorg" => "Zyborg? Hmmm....",
"ELB-HealthChecker" => "ELB Health Check"
}
end
def experiments=(experiments)
raise InvalidExperimentsFormatError.new("Experiments must be a Hash") unless experiments.respond_to?(:keys)
@experiments = experiments
end
def disabled?
!enabled
end
def experiment_for(name)
if normalized_experiments
# TODO symbols
normalized_experiments[name.to_sym]
end
end
def metrics
return @metrics if defined?(@metrics)
@metrics = {}
if self.experiments
self.experiments.each do |key, value|
metrics = value_for(value, :metric) rescue nil
Array(metrics).each do |metric_name|
if metric_name
@metrics[metric_name.to_sym] ||= []
@metrics[metric_name.to_sym] << Split::Experiment.new(key)
end
end
end
end
@metrics
end
def normalized_experiments
return nil if @experiments.nil?
experiment_config = {}
@experiments.keys.each do |name|
experiment_config[name.to_sym] = {}
end
@experiments.each do |experiment_name, settings|
alternatives = if (alts = value_for(settings, :alternatives))
normalize_alternatives(alts)
end
experiment_data = {
alternatives: alternatives,
goals: value_for(settings, :goals),
metadata: value_for(settings, :metadata),
algorithm: value_for(settings, :algorithm),
resettable: value_for(settings, :resettable)
}
experiment_data.each do |name, value|
experiment_config[experiment_name.to_sym][name] = value if value != nil
end
end
experiment_config
end
def normalize_alternatives(alternatives)
given_probability, num_with_probability = alternatives.inject([0, 0]) do |a, v|
p, n = a
if percent = value_for(v, :percent)
[p + percent, n + 1]
else
a
end
end
num_without_probability = alternatives.length - num_with_probability
unassigned_probability = ((100.0 - given_probability) / num_without_probability / 100.0)
if num_with_probability.nonzero?
alternatives = alternatives.map do |v|
if (name = value_for(v, :name)) && (percent = value_for(v, :percent))
{ name => percent / 100.0 }
elsif name = value_for(v, :name)
{ name => unassigned_probability }
else
{ v => unassigned_probability }
end
end
[alternatives.shift, alternatives]
else
alternatives = alternatives.dup
[alternatives.shift, alternatives]
end
end
def robot_regex
@robot_regex ||= /\b(?:#{escaped_bots.join('|')})\b|\A\W*\z/i
end
def initialize
@ignore_ip_addresses = []
@ignore_filter = proc { |request| is_robot? || is_ignored_ip_address? }
@db_failover = false
@db_failover_on_db_error = proc { |error| } # e.g. use Rails logger here
@on_experiment_reset = proc { |experiment| }
@on_experiment_delete = proc { |experiment| }
@on_before_experiment_reset = proc { |experiment| }
@on_before_experiment_delete = proc { |experiment| }
@on_experiment_winner_choose = proc { |experiment| }
@db_failover_allow_parameter_override = false
@allow_multiple_experiments = false
@enabled = true
@experiments = {}
@persistence = Split::Persistence::SessionAdapter
@persistence_cookie_length = 31536000 # One year from now
@persistence_cookie_domain = nil
@algorithm = Split::Algorithms::WeightedSample
@include_rails_helper = true
@beta_probability_simulations = 10000
@winning_alternative_recalculation_interval = 60 * 60 * 24 # 1 day
@redis = ENV.fetch(ENV.fetch("REDIS_PROVIDER", "REDIS_URL"), "redis://localhost:6379")
@dashboard_pagination_default_per_page = 10
end
private
def value_for(hash, key)
if hash.kind_of?(Hash)
hash.has_key?(key.to_s) ? hash[key.to_s] : hash[key.to_sym]
end
end
def escaped_bots
bots.map { |key, _| Regexp.escape(key) }
end
end
end
<MSG> corrected typo in robot hash
<DFF> @@ -7,7 +7,7 @@ module Split
'libwww-perl' => 'Perl client-server library loved by script kids',
'lwp-trivial' => 'Another Perl library loved by script kids',
'msnbot' => 'Microsoft bot',
- 'SiteUpTime' => 'Site monitoring services',
+ 'SiteUptime' => 'Site monitoring services',
'Slurp' => 'Yahoo spider',
'WordPress' => 'WordPress spider',
'ZIBB' => 'ZIBB spider',
| 1 | corrected typo in robot hash | 1 | .rb | rb | mit | splitrb/split |
10071119 | <NME> experiment.rb
<BEF> # frozen_string_literal: true
module Split
class Experiment
attr_accessor :name
attr_accessor :goals
attr_accessor :alternative_probabilities
attr_accessor :metadata
attr_reader :alternatives
attr_reader :resettable
DEFAULT_OPTIONS = {
resettable: true
}
def self.find(name)
Split.cache(:experiments, name) do
return unless Split.redis.exists?(name)
Experiment.new(name).tap { |exp| exp.load_from_redis }
end
end
def initialize(name, options = {})
options = DEFAULT_OPTIONS.merge(options)
@name = name.to_s
extract_alternatives_from_options(options)
end
def self.finished_key(key)
"#{key}:finished"
end
def set_alternatives_and_options(options)
options_with_defaults = DEFAULT_OPTIONS.merge(
options.reject { |k, v| v.nil? }
)
Split.redis.smembers(:experiments).map {|e| find(e)}
end
def self.find(name)
if Split.redis.exists(name)
obj = self.new name
if alts[0].is_a? Hash
alts = alts[0].map { |k, v| { k => v } }
end
end
if alts.empty?
exp_config = Split.configuration.experiment_for(name)
if exp_config
alts = load_alternatives_from_configuration
options[:goals] = Split::GoalsCollection.new(@name).load_from_configuration
options[:metadata] = load_metadata_from_configuration
options[:resettable] = exp_config[:resettable]
options[:algorithm] = exp_config[:algorithm]
end
end
options[:alternatives] = alts
set_alternatives_and_options(options)
# calculate probability that each alternative is the winner
@alternative_probabilities = {}
alts
end
def save
validate!
if new_record?
start unless Split.configuration.start_manually
persist_experiment_configuration
elsif experiment_configuration_has_changed?
reset unless Split.configuration.reset_manually
persist_experiment_configuration
end
redis.hmset(experiment_config_key, :resettable, resettable.to_s,
:algorithm, algorithm.to_s)
self
end
def validate!
if @alternatives.empty? && Split.configuration.experiment_for(@name).nil?
raise ExperimentNotFound.new("Experiment #{@name} not found")
end
@alternatives.each { |a| a.validate! }
goals_collection.validate!
end
def new_record?
ExperimentCatalog.find(name).nil?
end
def ==(obj)
self.name == obj.name
end
def [](name)
alternatives.find { |a| a.name == name }
end
def algorithm
@algorithm ||= Split.configuration.algorithm
end
def algorithm=(algorithm)
@algorithm = algorithm.is_a?(String) ? algorithm.constantize : algorithm
end
def resettable=(resettable)
@resettable = resettable.is_a?(String) ? resettable == "true" : resettable
end
def alternatives=(alts)
@alternatives = alts.map do |alternative|
if alternative.kind_of?(Split::Alternative)
alternative
else
Split::Alternative.new(alternative, @name)
end
end
end
def winner
Split.cache(:experiment_winner, name) do
experiment_winner = redis.hget(:experiment_winner, name)
if experiment_winner
Split::Alternative.new(experiment_winner, name)
else
nil
end
end
end
def has_winner?
return @has_winner if defined? @has_winner
@has_winner = !winner.nil?
end
def winner=(winner_name)
redis.hset(:experiment_winner, name, winner_name.to_s)
@has_winner = true
Split.configuration.on_experiment_winner_choose.call(self)
end
def participant_count
alternatives.inject(0) { |sum, a| sum + a.participant_count }
end
def control
alternatives.first
end
def reset_winner
redis.hdel(:experiment_winner, name)
@has_winner = false
Split::Cache.clear_key(@name)
end
def start
redis.hset(:experiment_start_times, @name, Time.now.to_i)
end
def start_time
Split.cache(:experiment_start_times, @name) do
t = redis.hget(:experiment_start_times, @name)
if t
# Check if stored time is an integer
if t =~ /^[-+]?[0-9]+$/
Time.at(t.to_i)
else
Time.parse(t)
end
end
end
end
def next_alternative
winner || random_alternative
end
def random_alternative
if alternatives.length > 1
algorithm.choose_alternative(self)
else
alternatives.first
end
end
def version
@version ||= (redis.get("#{name}:version").to_i || 0)
end
def increment_version
@version = redis.incr("#{name}:version")
end
def key
if version.to_i > 0
"#{name}:#{version}"
else
name
end
end
def goals_key
"#{name}:goals"
end
def finished_key
self.class.finished_key(key)
end
def metadata_key
"#{name}:metadata"
end
def resettable?
resettable
end
def reset
Split.configuration.on_before_experiment_reset.call(self)
Split::Cache.clear_key(@name)
alternatives.each(&:reset)
reset_winner
Split.configuration.on_experiment_reset.call(self)
increment_version
end
def delete
Split.configuration.on_before_experiment_delete.call(self)
if Split.configuration.start_manually
redis.hdel(:experiment_start_times, @name)
end
reset_winner
redis.srem(:experiments, name)
remove_experiment_cohorting
remove_experiment_configuration
Split.configuration.on_experiment_delete.call(self)
increment_version
end
def delete_metadata
redis.del(metadata_key)
end
def load_from_redis
exp_config = redis.hgetall(experiment_config_key)
options = {
resettable: exp_config["resettable"],
algorithm: exp_config["algorithm"],
alternatives: load_alternatives_from_redis,
goals: Split::GoalsCollection.new(@name).load_from_redis,
metadata: load_metadata_from_redis
}
set_alternatives_and_options(options)
end
def calc_winning_alternatives
# Cache the winning alternatives so we recalculate them once per the specified interval.
intervals_since_epoch =
Time.now.utc.to_i / Split.configuration.winning_alternative_recalculation_interval
if self.calc_time != intervals_since_epoch
if goals.empty?
self.estimate_winning_alternative
else
goals.each do |goal|
self.estimate_winning_alternative(goal)
end
end
self.calc_time = intervals_since_epoch
self.save
end
end
def estimate_winning_alternative(goal = nil)
# initialize a hash of beta distributions based on the alternatives' conversion rates
beta_params = calc_beta_params(goal)
winning_alternatives = []
Split.configuration.beta_probability_simulations.times do
# calculate simulated conversion rates from the beta distributions
simulated_cr_hash = calc_simulated_conversion_rates(beta_params)
winning_alternative = find_simulated_winner(simulated_cr_hash)
# push the winning pair to the winning_alternatives array
winning_alternatives.push(winning_alternative)
end
winning_counts = count_simulated_wins(winning_alternatives)
@alternative_probabilities = calc_alternative_probabilities(winning_counts, Split.configuration.beta_probability_simulations)
write_to_alternatives(goal)
self.save
end
def write_to_alternatives(goal = nil)
alternatives.each do |alternative|
alternative.set_p_winner(@alternative_probabilities[alternative], goal)
end
end
def calc_alternative_probabilities(winning_counts, number_of_simulations)
alternative_probabilities = {}
winning_counts.each do |alternative, wins|
alternative_probabilities[alternative] = wins / number_of_simulations.to_f
end
alternative_probabilities
end
def count_simulated_wins(winning_alternatives)
# initialize a hash to keep track of winning alternative in simulations
winning_counts = {}
alternatives.each do |alternative|
winning_counts[alternative] = 0
end
# count number of times each alternative won, calculate probabilities, place in hash
winning_alternatives.each do |alternative|
winning_counts[alternative] += 1
end
winning_counts
end
def find_simulated_winner(simulated_cr_hash)
# figure out which alternative had the highest simulated conversion rate
winning_pair = ["", 0.0]
simulated_cr_hash.each do |alternative, rate|
if rate > winning_pair[1]
winning_pair = [alternative, rate]
end
end
winner = winning_pair[0]
winner
end
def calc_simulated_conversion_rates(beta_params)
simulated_cr_hash = {}
# create a hash which has the conversion rate pulled from each alternative's beta distribution
beta_params.each do |alternative, params|
alpha = params[0]
beta = params[1]
simulated_conversion_rate = Split::Algorithms.beta_distribution_rng(alpha, beta)
simulated_cr_hash[alternative] = simulated_conversion_rate
end
simulated_cr_hash
end
def calc_beta_params(goal = nil)
beta_params = {}
alternatives.each do |alternative|
conversions = goal.nil? ? alternative.completed_count : alternative.completed_count(goal)
alpha = 1 + conversions
beta = 1 + alternative.participant_count - conversions
params = [alpha, beta]
beta_params[alternative] = params
end
beta_params
end
def calc_time=(time)
redis.hset(experiment_config_key, :calc_time, time)
end
def calc_time
redis.hget(experiment_config_key, :calc_time).to_i
end
def jstring(goal = nil)
js_id = if goal.nil?
name
else
name + "-" + goal
end
js_id.gsub("/", "--")
end
def cohorting_disabled?
@cohorting_disabled ||= begin
value = redis.hget(experiment_config_key, :cohorting)
value.nil? ? false : value.downcase == "true"
end
end
def disable_cohorting
@cohorting_disabled = true
redis.hset(experiment_config_key, :cohorting, true.to_s)
end
def enable_cohorting
@cohorting_disabled = false
redis.hset(experiment_config_key, :cohorting, false.to_s)
end
protected
def experiment_config_key
"experiment_configurations/#{@name}"
end
def load_metadata_from_configuration
Split.configuration.experiment_for(@name)[:metadata]
end
def load_metadata_from_redis
meta = redis.get(metadata_key)
JSON.parse(meta) unless meta.nil?
end
def load_alternatives_from_configuration
alts = Split.configuration.experiment_for(@name)[:alternatives]
raise ArgumentError, "Experiment configuration is missing :alternatives array" unless alts
if alts.is_a?(Hash)
alts.keys
else
alts.flatten
end
end
def load_alternatives_from_redis
alternatives = redis.lrange(@name, 0, -1)
alternatives.map do |alt|
alt = begin
JSON.parse(alt)
rescue
alt
end
Split::Alternative.new(alt, @name)
end
end
private
def redis
Split.redis
end
def redis_interface
RedisInterface.new
end
def persist_experiment_configuration
redis_interface.add_to_set(:experiments, name)
redis_interface.persist_list(name, @alternatives.map { |alt| { alt.name => alt.weight }.to_json })
goals_collection.save
if @metadata
redis.set(metadata_key, @metadata.to_json)
else
delete_metadata
end
end
def remove_experiment_configuration
@alternatives.each(&:delete)
goals_collection.delete
delete_metadata
redis.del(@name)
end
def experiment_configuration_has_changed?
existing_experiment = Experiment.find(@name)
existing_experiment.alternatives.map(&:to_s) != @alternatives.map(&:to_s) ||
existing_experiment.goals != @goals ||
existing_experiment.metadata != @metadata
end
def goals_collection
Split::GoalsCollection.new(@name, @goals)
end
def remove_experiment_cohorting
@cohorting_disabled = false
redis.hdel(experiment_config_key, :cohorting)
end
end
end
<MSG> Sort experiments on Dashboard so "active" ones without a winner appear first
<DFF> @@ -41,6 +41,11 @@ module Split
Split.redis.smembers(:experiments).map {|e| find(e)}
end
+ # Return experiments without a winner (considered "active") first
+ def self.all_active_first
+ all.sort_by{|e| e.winner ? 1 : 0} # sort_by hack since true/false isn't sortable
+ end
+
def self.find(name)
if Split.redis.exists(name)
obj = self.new name
| 5 | Sort experiments on Dashboard so "active" ones without a winner appear first | 0 | .rb | rb | mit | splitrb/split |
10071120 | <NME> experiment.rb
<BEF> # frozen_string_literal: true
module Split
class Experiment
attr_accessor :name
attr_accessor :goals
attr_accessor :alternative_probabilities
attr_accessor :metadata
attr_reader :alternatives
attr_reader :resettable
DEFAULT_OPTIONS = {
resettable: true
}
def self.find(name)
Split.cache(:experiments, name) do
return unless Split.redis.exists?(name)
Experiment.new(name).tap { |exp| exp.load_from_redis }
end
end
def initialize(name, options = {})
options = DEFAULT_OPTIONS.merge(options)
@name = name.to_s
extract_alternatives_from_options(options)
end
def self.finished_key(key)
"#{key}:finished"
end
def set_alternatives_and_options(options)
options_with_defaults = DEFAULT_OPTIONS.merge(
options.reject { |k, v| v.nil? }
)
Split.redis.smembers(:experiments).map {|e| find(e)}
end
def self.find(name)
if Split.redis.exists(name)
obj = self.new name
if alts[0].is_a? Hash
alts = alts[0].map { |k, v| { k => v } }
end
end
if alts.empty?
exp_config = Split.configuration.experiment_for(name)
if exp_config
alts = load_alternatives_from_configuration
options[:goals] = Split::GoalsCollection.new(@name).load_from_configuration
options[:metadata] = load_metadata_from_configuration
options[:resettable] = exp_config[:resettable]
options[:algorithm] = exp_config[:algorithm]
end
end
options[:alternatives] = alts
set_alternatives_and_options(options)
# calculate probability that each alternative is the winner
@alternative_probabilities = {}
alts
end
def save
validate!
if new_record?
start unless Split.configuration.start_manually
persist_experiment_configuration
elsif experiment_configuration_has_changed?
reset unless Split.configuration.reset_manually
persist_experiment_configuration
end
redis.hmset(experiment_config_key, :resettable, resettable.to_s,
:algorithm, algorithm.to_s)
self
end
def validate!
if @alternatives.empty? && Split.configuration.experiment_for(@name).nil?
raise ExperimentNotFound.new("Experiment #{@name} not found")
end
@alternatives.each { |a| a.validate! }
goals_collection.validate!
end
def new_record?
ExperimentCatalog.find(name).nil?
end
def ==(obj)
self.name == obj.name
end
def [](name)
alternatives.find { |a| a.name == name }
end
def algorithm
@algorithm ||= Split.configuration.algorithm
end
def algorithm=(algorithm)
@algorithm = algorithm.is_a?(String) ? algorithm.constantize : algorithm
end
def resettable=(resettable)
@resettable = resettable.is_a?(String) ? resettable == "true" : resettable
end
def alternatives=(alts)
@alternatives = alts.map do |alternative|
if alternative.kind_of?(Split::Alternative)
alternative
else
Split::Alternative.new(alternative, @name)
end
end
end
def winner
Split.cache(:experiment_winner, name) do
experiment_winner = redis.hget(:experiment_winner, name)
if experiment_winner
Split::Alternative.new(experiment_winner, name)
else
nil
end
end
end
def has_winner?
return @has_winner if defined? @has_winner
@has_winner = !winner.nil?
end
def winner=(winner_name)
redis.hset(:experiment_winner, name, winner_name.to_s)
@has_winner = true
Split.configuration.on_experiment_winner_choose.call(self)
end
def participant_count
alternatives.inject(0) { |sum, a| sum + a.participant_count }
end
def control
alternatives.first
end
def reset_winner
redis.hdel(:experiment_winner, name)
@has_winner = false
Split::Cache.clear_key(@name)
end
def start
redis.hset(:experiment_start_times, @name, Time.now.to_i)
end
def start_time
Split.cache(:experiment_start_times, @name) do
t = redis.hget(:experiment_start_times, @name)
if t
# Check if stored time is an integer
if t =~ /^[-+]?[0-9]+$/
Time.at(t.to_i)
else
Time.parse(t)
end
end
end
end
def next_alternative
winner || random_alternative
end
def random_alternative
if alternatives.length > 1
algorithm.choose_alternative(self)
else
alternatives.first
end
end
def version
@version ||= (redis.get("#{name}:version").to_i || 0)
end
def increment_version
@version = redis.incr("#{name}:version")
end
def key
if version.to_i > 0
"#{name}:#{version}"
else
name
end
end
def goals_key
"#{name}:goals"
end
def finished_key
self.class.finished_key(key)
end
def metadata_key
"#{name}:metadata"
end
def resettable?
resettable
end
def reset
Split.configuration.on_before_experiment_reset.call(self)
Split::Cache.clear_key(@name)
alternatives.each(&:reset)
reset_winner
Split.configuration.on_experiment_reset.call(self)
increment_version
end
def delete
Split.configuration.on_before_experiment_delete.call(self)
if Split.configuration.start_manually
redis.hdel(:experiment_start_times, @name)
end
reset_winner
redis.srem(:experiments, name)
remove_experiment_cohorting
remove_experiment_configuration
Split.configuration.on_experiment_delete.call(self)
increment_version
end
def delete_metadata
redis.del(metadata_key)
end
def load_from_redis
exp_config = redis.hgetall(experiment_config_key)
options = {
resettable: exp_config["resettable"],
algorithm: exp_config["algorithm"],
alternatives: load_alternatives_from_redis,
goals: Split::GoalsCollection.new(@name).load_from_redis,
metadata: load_metadata_from_redis
}
set_alternatives_and_options(options)
end
def calc_winning_alternatives
# Cache the winning alternatives so we recalculate them once per the specified interval.
intervals_since_epoch =
Time.now.utc.to_i / Split.configuration.winning_alternative_recalculation_interval
if self.calc_time != intervals_since_epoch
if goals.empty?
self.estimate_winning_alternative
else
goals.each do |goal|
self.estimate_winning_alternative(goal)
end
end
self.calc_time = intervals_since_epoch
self.save
end
end
def estimate_winning_alternative(goal = nil)
# initialize a hash of beta distributions based on the alternatives' conversion rates
beta_params = calc_beta_params(goal)
winning_alternatives = []
Split.configuration.beta_probability_simulations.times do
# calculate simulated conversion rates from the beta distributions
simulated_cr_hash = calc_simulated_conversion_rates(beta_params)
winning_alternative = find_simulated_winner(simulated_cr_hash)
# push the winning pair to the winning_alternatives array
winning_alternatives.push(winning_alternative)
end
winning_counts = count_simulated_wins(winning_alternatives)
@alternative_probabilities = calc_alternative_probabilities(winning_counts, Split.configuration.beta_probability_simulations)
write_to_alternatives(goal)
self.save
end
def write_to_alternatives(goal = nil)
alternatives.each do |alternative|
alternative.set_p_winner(@alternative_probabilities[alternative], goal)
end
end
def calc_alternative_probabilities(winning_counts, number_of_simulations)
alternative_probabilities = {}
winning_counts.each do |alternative, wins|
alternative_probabilities[alternative] = wins / number_of_simulations.to_f
end
alternative_probabilities
end
def count_simulated_wins(winning_alternatives)
# initialize a hash to keep track of winning alternative in simulations
winning_counts = {}
alternatives.each do |alternative|
winning_counts[alternative] = 0
end
# count number of times each alternative won, calculate probabilities, place in hash
winning_alternatives.each do |alternative|
winning_counts[alternative] += 1
end
winning_counts
end
def find_simulated_winner(simulated_cr_hash)
# figure out which alternative had the highest simulated conversion rate
winning_pair = ["", 0.0]
simulated_cr_hash.each do |alternative, rate|
if rate > winning_pair[1]
winning_pair = [alternative, rate]
end
end
winner = winning_pair[0]
winner
end
def calc_simulated_conversion_rates(beta_params)
simulated_cr_hash = {}
# create a hash which has the conversion rate pulled from each alternative's beta distribution
beta_params.each do |alternative, params|
alpha = params[0]
beta = params[1]
simulated_conversion_rate = Split::Algorithms.beta_distribution_rng(alpha, beta)
simulated_cr_hash[alternative] = simulated_conversion_rate
end
simulated_cr_hash
end
def calc_beta_params(goal = nil)
beta_params = {}
alternatives.each do |alternative|
conversions = goal.nil? ? alternative.completed_count : alternative.completed_count(goal)
alpha = 1 + conversions
beta = 1 + alternative.participant_count - conversions
params = [alpha, beta]
beta_params[alternative] = params
end
beta_params
end
def calc_time=(time)
redis.hset(experiment_config_key, :calc_time, time)
end
def calc_time
redis.hget(experiment_config_key, :calc_time).to_i
end
def jstring(goal = nil)
js_id = if goal.nil?
name
else
name + "-" + goal
end
js_id.gsub("/", "--")
end
def cohorting_disabled?
@cohorting_disabled ||= begin
value = redis.hget(experiment_config_key, :cohorting)
value.nil? ? false : value.downcase == "true"
end
end
def disable_cohorting
@cohorting_disabled = true
redis.hset(experiment_config_key, :cohorting, true.to_s)
end
def enable_cohorting
@cohorting_disabled = false
redis.hset(experiment_config_key, :cohorting, false.to_s)
end
protected
def experiment_config_key
"experiment_configurations/#{@name}"
end
def load_metadata_from_configuration
Split.configuration.experiment_for(@name)[:metadata]
end
def load_metadata_from_redis
meta = redis.get(metadata_key)
JSON.parse(meta) unless meta.nil?
end
def load_alternatives_from_configuration
alts = Split.configuration.experiment_for(@name)[:alternatives]
raise ArgumentError, "Experiment configuration is missing :alternatives array" unless alts
if alts.is_a?(Hash)
alts.keys
else
alts.flatten
end
end
def load_alternatives_from_redis
alternatives = redis.lrange(@name, 0, -1)
alternatives.map do |alt|
alt = begin
JSON.parse(alt)
rescue
alt
end
Split::Alternative.new(alt, @name)
end
end
private
def redis
Split.redis
end
def redis_interface
RedisInterface.new
end
def persist_experiment_configuration
redis_interface.add_to_set(:experiments, name)
redis_interface.persist_list(name, @alternatives.map { |alt| { alt.name => alt.weight }.to_json })
goals_collection.save
if @metadata
redis.set(metadata_key, @metadata.to_json)
else
delete_metadata
end
end
def remove_experiment_configuration
@alternatives.each(&:delete)
goals_collection.delete
delete_metadata
redis.del(@name)
end
def experiment_configuration_has_changed?
existing_experiment = Experiment.find(@name)
existing_experiment.alternatives.map(&:to_s) != @alternatives.map(&:to_s) ||
existing_experiment.goals != @goals ||
existing_experiment.metadata != @metadata
end
def goals_collection
Split::GoalsCollection.new(@name, @goals)
end
def remove_experiment_cohorting
@cohorting_disabled = false
redis.hdel(experiment_config_key, :cohorting)
end
end
end
<MSG> Sort experiments on Dashboard so "active" ones without a winner appear first
<DFF> @@ -41,6 +41,11 @@ module Split
Split.redis.smembers(:experiments).map {|e| find(e)}
end
+ # Return experiments without a winner (considered "active") first
+ def self.all_active_first
+ all.sort_by{|e| e.winner ? 1 : 0} # sort_by hack since true/false isn't sortable
+ end
+
def self.find(name)
if Split.redis.exists(name)
obj = self.new name
| 5 | Sort experiments on Dashboard so "active" ones without a winner appear first | 0 | .rb | rb | mit | splitrb/split |
10071121 | <NME> experiment.rb
<BEF> # frozen_string_literal: true
module Split
class Experiment
attr_accessor :name
attr_accessor :goals
attr_accessor :alternative_probabilities
attr_accessor :metadata
attr_reader :alternatives
attr_reader :resettable
DEFAULT_OPTIONS = {
resettable: true
}
def self.find(name)
Split.cache(:experiments, name) do
return unless Split.redis.exists?(name)
Experiment.new(name).tap { |exp| exp.load_from_redis }
end
end
def initialize(name, options = {})
options = DEFAULT_OPTIONS.merge(options)
@name = name.to_s
extract_alternatives_from_options(options)
end
def self.finished_key(key)
"#{key}:finished"
end
def set_alternatives_and_options(options)
options_with_defaults = DEFAULT_OPTIONS.merge(
options.reject { |k, v| v.nil? }
)
Split.redis.smembers(:experiments).map {|e| find(e)}
end
def self.find(name)
if Split.redis.exists(name)
obj = self.new name
if alts[0].is_a? Hash
alts = alts[0].map { |k, v| { k => v } }
end
end
if alts.empty?
exp_config = Split.configuration.experiment_for(name)
if exp_config
alts = load_alternatives_from_configuration
options[:goals] = Split::GoalsCollection.new(@name).load_from_configuration
options[:metadata] = load_metadata_from_configuration
options[:resettable] = exp_config[:resettable]
options[:algorithm] = exp_config[:algorithm]
end
end
options[:alternatives] = alts
set_alternatives_and_options(options)
# calculate probability that each alternative is the winner
@alternative_probabilities = {}
alts
end
def save
validate!
if new_record?
start unless Split.configuration.start_manually
persist_experiment_configuration
elsif experiment_configuration_has_changed?
reset unless Split.configuration.reset_manually
persist_experiment_configuration
end
redis.hmset(experiment_config_key, :resettable, resettable.to_s,
:algorithm, algorithm.to_s)
self
end
def validate!
if @alternatives.empty? && Split.configuration.experiment_for(@name).nil?
raise ExperimentNotFound.new("Experiment #{@name} not found")
end
@alternatives.each { |a| a.validate! }
goals_collection.validate!
end
def new_record?
ExperimentCatalog.find(name).nil?
end
def ==(obj)
self.name == obj.name
end
def [](name)
alternatives.find { |a| a.name == name }
end
def algorithm
@algorithm ||= Split.configuration.algorithm
end
def algorithm=(algorithm)
@algorithm = algorithm.is_a?(String) ? algorithm.constantize : algorithm
end
def resettable=(resettable)
@resettable = resettable.is_a?(String) ? resettable == "true" : resettable
end
def alternatives=(alts)
@alternatives = alts.map do |alternative|
if alternative.kind_of?(Split::Alternative)
alternative
else
Split::Alternative.new(alternative, @name)
end
end
end
def winner
Split.cache(:experiment_winner, name) do
experiment_winner = redis.hget(:experiment_winner, name)
if experiment_winner
Split::Alternative.new(experiment_winner, name)
else
nil
end
end
end
def has_winner?
return @has_winner if defined? @has_winner
@has_winner = !winner.nil?
end
def winner=(winner_name)
redis.hset(:experiment_winner, name, winner_name.to_s)
@has_winner = true
Split.configuration.on_experiment_winner_choose.call(self)
end
def participant_count
alternatives.inject(0) { |sum, a| sum + a.participant_count }
end
def control
alternatives.first
end
def reset_winner
redis.hdel(:experiment_winner, name)
@has_winner = false
Split::Cache.clear_key(@name)
end
def start
redis.hset(:experiment_start_times, @name, Time.now.to_i)
end
def start_time
Split.cache(:experiment_start_times, @name) do
t = redis.hget(:experiment_start_times, @name)
if t
# Check if stored time is an integer
if t =~ /^[-+]?[0-9]+$/
Time.at(t.to_i)
else
Time.parse(t)
end
end
end
end
def next_alternative
winner || random_alternative
end
def random_alternative
if alternatives.length > 1
algorithm.choose_alternative(self)
else
alternatives.first
end
end
def version
@version ||= (redis.get("#{name}:version").to_i || 0)
end
def increment_version
@version = redis.incr("#{name}:version")
end
def key
if version.to_i > 0
"#{name}:#{version}"
else
name
end
end
def goals_key
"#{name}:goals"
end
def finished_key
self.class.finished_key(key)
end
def metadata_key
"#{name}:metadata"
end
def resettable?
resettable
end
def reset
Split.configuration.on_before_experiment_reset.call(self)
Split::Cache.clear_key(@name)
alternatives.each(&:reset)
reset_winner
Split.configuration.on_experiment_reset.call(self)
increment_version
end
def delete
Split.configuration.on_before_experiment_delete.call(self)
if Split.configuration.start_manually
redis.hdel(:experiment_start_times, @name)
end
reset_winner
redis.srem(:experiments, name)
remove_experiment_cohorting
remove_experiment_configuration
Split.configuration.on_experiment_delete.call(self)
increment_version
end
def delete_metadata
redis.del(metadata_key)
end
def load_from_redis
exp_config = redis.hgetall(experiment_config_key)
options = {
resettable: exp_config["resettable"],
algorithm: exp_config["algorithm"],
alternatives: load_alternatives_from_redis,
goals: Split::GoalsCollection.new(@name).load_from_redis,
metadata: load_metadata_from_redis
}
set_alternatives_and_options(options)
end
def calc_winning_alternatives
# Cache the winning alternatives so we recalculate them once per the specified interval.
intervals_since_epoch =
Time.now.utc.to_i / Split.configuration.winning_alternative_recalculation_interval
if self.calc_time != intervals_since_epoch
if goals.empty?
self.estimate_winning_alternative
else
goals.each do |goal|
self.estimate_winning_alternative(goal)
end
end
self.calc_time = intervals_since_epoch
self.save
end
end
def estimate_winning_alternative(goal = nil)
# initialize a hash of beta distributions based on the alternatives' conversion rates
beta_params = calc_beta_params(goal)
winning_alternatives = []
Split.configuration.beta_probability_simulations.times do
# calculate simulated conversion rates from the beta distributions
simulated_cr_hash = calc_simulated_conversion_rates(beta_params)
winning_alternative = find_simulated_winner(simulated_cr_hash)
# push the winning pair to the winning_alternatives array
winning_alternatives.push(winning_alternative)
end
winning_counts = count_simulated_wins(winning_alternatives)
@alternative_probabilities = calc_alternative_probabilities(winning_counts, Split.configuration.beta_probability_simulations)
write_to_alternatives(goal)
self.save
end
def write_to_alternatives(goal = nil)
alternatives.each do |alternative|
alternative.set_p_winner(@alternative_probabilities[alternative], goal)
end
end
def calc_alternative_probabilities(winning_counts, number_of_simulations)
alternative_probabilities = {}
winning_counts.each do |alternative, wins|
alternative_probabilities[alternative] = wins / number_of_simulations.to_f
end
alternative_probabilities
end
def count_simulated_wins(winning_alternatives)
# initialize a hash to keep track of winning alternative in simulations
winning_counts = {}
alternatives.each do |alternative|
winning_counts[alternative] = 0
end
# count number of times each alternative won, calculate probabilities, place in hash
winning_alternatives.each do |alternative|
winning_counts[alternative] += 1
end
winning_counts
end
def find_simulated_winner(simulated_cr_hash)
# figure out which alternative had the highest simulated conversion rate
winning_pair = ["", 0.0]
simulated_cr_hash.each do |alternative, rate|
if rate > winning_pair[1]
winning_pair = [alternative, rate]
end
end
winner = winning_pair[0]
winner
end
def calc_simulated_conversion_rates(beta_params)
simulated_cr_hash = {}
# create a hash which has the conversion rate pulled from each alternative's beta distribution
beta_params.each do |alternative, params|
alpha = params[0]
beta = params[1]
simulated_conversion_rate = Split::Algorithms.beta_distribution_rng(alpha, beta)
simulated_cr_hash[alternative] = simulated_conversion_rate
end
simulated_cr_hash
end
def calc_beta_params(goal = nil)
beta_params = {}
alternatives.each do |alternative|
conversions = goal.nil? ? alternative.completed_count : alternative.completed_count(goal)
alpha = 1 + conversions
beta = 1 + alternative.participant_count - conversions
params = [alpha, beta]
beta_params[alternative] = params
end
beta_params
end
def calc_time=(time)
redis.hset(experiment_config_key, :calc_time, time)
end
def calc_time
redis.hget(experiment_config_key, :calc_time).to_i
end
def jstring(goal = nil)
js_id = if goal.nil?
name
else
name + "-" + goal
end
js_id.gsub("/", "--")
end
def cohorting_disabled?
@cohorting_disabled ||= begin
value = redis.hget(experiment_config_key, :cohorting)
value.nil? ? false : value.downcase == "true"
end
end
def disable_cohorting
@cohorting_disabled = true
redis.hset(experiment_config_key, :cohorting, true.to_s)
end
def enable_cohorting
@cohorting_disabled = false
redis.hset(experiment_config_key, :cohorting, false.to_s)
end
protected
def experiment_config_key
"experiment_configurations/#{@name}"
end
def load_metadata_from_configuration
Split.configuration.experiment_for(@name)[:metadata]
end
def load_metadata_from_redis
meta = redis.get(metadata_key)
JSON.parse(meta) unless meta.nil?
end
def load_alternatives_from_configuration
alts = Split.configuration.experiment_for(@name)[:alternatives]
raise ArgumentError, "Experiment configuration is missing :alternatives array" unless alts
if alts.is_a?(Hash)
alts.keys
else
alts.flatten
end
end
def load_alternatives_from_redis
alternatives = redis.lrange(@name, 0, -1)
alternatives.map do |alt|
alt = begin
JSON.parse(alt)
rescue
alt
end
Split::Alternative.new(alt, @name)
end
end
private
def redis
Split.redis
end
def redis_interface
RedisInterface.new
end
def persist_experiment_configuration
redis_interface.add_to_set(:experiments, name)
redis_interface.persist_list(name, @alternatives.map { |alt| { alt.name => alt.weight }.to_json })
goals_collection.save
if @metadata
redis.set(metadata_key, @metadata.to_json)
else
delete_metadata
end
end
def remove_experiment_configuration
@alternatives.each(&:delete)
goals_collection.delete
delete_metadata
redis.del(@name)
end
def experiment_configuration_has_changed?
existing_experiment = Experiment.find(@name)
existing_experiment.alternatives.map(&:to_s) != @alternatives.map(&:to_s) ||
existing_experiment.goals != @goals ||
existing_experiment.metadata != @metadata
end
def goals_collection
Split::GoalsCollection.new(@name, @goals)
end
def remove_experiment_cohorting
@cohorting_disabled = false
redis.hdel(experiment_config_key, :cohorting)
end
end
end
<MSG> Sort experiments on Dashboard so "active" ones without a winner appear first
<DFF> @@ -41,6 +41,11 @@ module Split
Split.redis.smembers(:experiments).map {|e| find(e)}
end
+ # Return experiments without a winner (considered "active") first
+ def self.all_active_first
+ all.sort_by{|e| e.winner ? 1 : 0} # sort_by hack since true/false isn't sortable
+ end
+
def self.find(name)
if Split.redis.exists(name)
obj = self.new name
| 5 | Sort experiments on Dashboard so "active" ones without a winner appear first | 0 | .rb | rb | mit | splitrb/split |
10071122 | <NME> configuration.rb
<BEF> # frozen_string_literal: true
module Split
class Configuration
attr_accessor :ignore_ip_addresses
attr_accessor :ignore_filter
attr_accessor :db_failover
attr_accessor :db_failover_on_db_error
attr_accessor :db_failover_allow_parameter_override
attr_accessor :allow_multiple_experiments
attr_accessor :enabled
attr_accessor :persistence
attr_accessor :persistence_cookie_length
attr_accessor :persistence_cookie_domain
attr_accessor :algorithm
attr_accessor :store_override
attr_accessor :start_manually
attr_accessor :reset_manually
attr_accessor :on_trial
attr_accessor :on_trial_choose
attr_accessor :on_trial_complete
attr_accessor :on_experiment_reset
attr_accessor :on_experiment_delete
attr_accessor :on_before_experiment_reset
attr_accessor :on_experiment_winner_choose
attr_accessor :on_before_experiment_delete
attr_accessor :include_rails_helper
attr_accessor :beta_probability_simulations
attr_accessor :winning_alternative_recalculation_interval
attr_accessor :redis
attr_accessor :dashboard_pagination_default_per_page
attr_accessor :cache
attr_reader :experiments
attr_writer :bots
attr_writer :robot_regex
def bots
@bots ||= {
# Indexers
"AdsBot-Google" => "Google Adwords",
"Baidu" => "Chinese search engine",
"Baiduspider" => "Chinese search engine",
"bingbot" => "Microsoft bing bot",
"Butterfly" => "Topsy Labs",
"Gigabot" => "Gigabot spider",
"Googlebot" => "Google spider",
"MJ12bot" => "Majestic-12 spider",
"msnbot" => "Microsoft bot",
"rogerbot" => "SeoMoz spider",
"PaperLiBot" => "PaperLi is another content curation service",
"Slurp" => "Yahoo spider",
"Sogou" => "Chinese search engine",
"spider" => "generic web spider",
"UnwindFetchor" => "Gnip crawler",
"WordPress" => "WordPress spider",
"YandexAccessibilityBot" => "Yandex accessibility spider",
"YandexBot" => "Yandex spider",
"YandexMobileBot" => "Yandex mobile spider",
"ZIBB" => "ZIBB spider",
# HTTP libraries
"Apache-HttpClient" => "Java http library",
"AppEngine-Google" => "Google App Engine",
"curl" => "curl unix CLI http client",
"ColdFusion" => "ColdFusion http library",
"EventMachine HttpClient" => "Ruby http library",
"Go http package" => "Go http library",
"Go-http-client" => "Go http library",
"Java" => "Generic Java http library",
"libwww-perl" => "Perl client-server library loved by script kids",
"lwp-trivial" => "Another Perl library loved by script kids",
"Python-urllib" => "Python http library",
"PycURL" => "Python http library",
"Test Certificate Info" => "C http library?",
"Typhoeus" => "Ruby http library",
"Wget" => "wget unix CLI http client",
# URL expanders / previewers
"awe.sm" => "Awe.sm URL expander",
"bitlybot" => "bit.ly bot",
"[email protected]" => "Linkfluence bot",
"facebookexternalhit" => "facebook bot",
"Facebot" => "Facebook crawler",
"Feedfetcher-Google" => "Google Feedfetcher",
"https://developers.google.com/+/web/snippet" => "Google+ Snippet Fetcher",
"LinkedInBot" => "LinkedIn bot",
"LongURL" => "URL expander service",
"NING" => "NING - Yet Another Twitter Swarmer",
"Pinterestbot" => "Pinterest Bot",
"redditbot" => "Reddit Bot",
"ShortLinkTranslate" => "Link shortener",
"Slackbot" => "Slackbot link expander",
"TweetmemeBot" => "TweetMeMe Crawler",
"Twitterbot" => "Twitter URL expander",
"UnwindFetch" => "Gnip URL expander",
"vkShare" => "VKontake Sharer",
# Uptime monitoring
"check_http" => "Nagios monitor",
"GoogleStackdriverMonitoring" => "Google Cloud monitor",
"NewRelicPinger" => "NewRelic monitor",
"Panopta" => "Monitoring service",
"Pingdom" => "Pingdom monitoring",
"SiteUptime" => "Site monitoring services",
"UptimeRobot" => "Monitoring service",
# ???
"DigitalPersona Fingerprint Software" => "HP Fingerprint scanner",
"ShowyouBot" => "Showyou iOS app spider",
"ZyBorg" => "Zyborg? Hmmm....",
"ELB-HealthChecker" => "ELB Health Check"
}
end
def experiments=(experiments)
raise InvalidExperimentsFormatError.new("Experiments must be a Hash") unless experiments.respond_to?(:keys)
@experiments = experiments
end
def disabled?
!enabled
end
def experiment_for(name)
if normalized_experiments
# TODO symbols
normalized_experiments[name.to_sym]
end
def normalized_experiments
if @experiments.nil?
nil
else
experiment_config = {}
@experiments.keys.each do |name|
experiment_config[name.to_sym] = {}
end
@experiments.each do |experiment_name, settings|
if alternatives = value_for(settings, :alternatives)
experiment_config[experiment_name.to_sym][:alternatives] = normalize_alternatives(alternatives)
end
if goals = value_for(settings, :goals)
experiment_config[experiment_name.to_sym][:goals] = goals
end
if metadata = value_for(settings, :metadata)
experiment_config[experiment_name.to_sym][:metadata] = metadata
end
if algorithm = value_for(settings, :algorithm)
experiment_config[experiment_name.to_sym][:algorithm] = algorithm
end
if (resettable = value_for(settings, :resettable)) != nil
experiment_config[experiment_name.to_sym][:resettable] = resettable
end
end
experiment_config
end
end
def normalize_alternatives(alternatives)
experiment_data = {
alternatives: alternatives,
goals: value_for(settings, :goals),
metadata: value_for(settings, :metadata),
algorithm: value_for(settings, :algorithm),
resettable: value_for(settings, :resettable)
}
experiment_data.each do |name, value|
experiment_config[experiment_name.to_sym][name] = value if value != nil
end
end
experiment_config
end
def normalize_alternatives(alternatives)
given_probability, num_with_probability = alternatives.inject([0, 0]) do |a, v|
p, n = a
if percent = value_for(v, :percent)
[p + percent, n + 1]
else
a
end
end
num_without_probability = alternatives.length - num_with_probability
unassigned_probability = ((100.0 - given_probability) / num_without_probability / 100.0)
if num_with_probability.nonzero?
alternatives = alternatives.map do |v|
if (name = value_for(v, :name)) && (percent = value_for(v, :percent))
{ name => percent / 100.0 }
elsif name = value_for(v, :name)
{ name => unassigned_probability }
else
{ v => unassigned_probability }
end
end
[alternatives.shift, alternatives]
else
alternatives = alternatives.dup
[alternatives.shift, alternatives]
end
end
def robot_regex
@robot_regex ||= /\b(?:#{escaped_bots.join('|')})\b|\A\W*\z/i
end
def initialize
@ignore_ip_addresses = []
@ignore_filter = proc { |request| is_robot? || is_ignored_ip_address? }
@db_failover = false
@db_failover_on_db_error = proc { |error| } # e.g. use Rails logger here
@on_experiment_reset = proc { |experiment| }
@on_experiment_delete = proc { |experiment| }
@on_before_experiment_reset = proc { |experiment| }
@on_before_experiment_delete = proc { |experiment| }
@on_experiment_winner_choose = proc { |experiment| }
@db_failover_allow_parameter_override = false
@allow_multiple_experiments = false
@enabled = true
@experiments = {}
@persistence = Split::Persistence::SessionAdapter
@persistence_cookie_length = 31536000 # One year from now
@persistence_cookie_domain = nil
@algorithm = Split::Algorithms::WeightedSample
@include_rails_helper = true
@beta_probability_simulations = 10000
@winning_alternative_recalculation_interval = 60 * 60 * 24 # 1 day
@redis = ENV.fetch(ENV.fetch("REDIS_PROVIDER", "REDIS_URL"), "redis://localhost:6379")
@dashboard_pagination_default_per_page = 10
end
private
def value_for(hash, key)
if hash.kind_of?(Hash)
hash.has_key?(key.to_s) ? hash[key.to_s] : hash[key.to_sym]
end
end
def escaped_bots
bots.map { |key, _| Regexp.escape(key) }
end
end
end
<MSG> Simplify Configuration#normalized_experiments
- Convert `if condition nil...else` clause to `return nil if condition`
- Store experiment_data in a hash and then iterate over all values and
assign them to experiment_config only when set
**Assumption**: I assume that settings[key] where key in (:goals,
:alternatives, :metadata, :algorithm) will never return false.
settings[:resettable], will return false if set to false.
<DFF> @@ -130,38 +130,32 @@ module Split
end
def normalized_experiments
- if @experiments.nil?
- nil
- else
- experiment_config = {}
- @experiments.keys.each do |name|
- experiment_config[name.to_sym] = {}
- end
-
- @experiments.each do |experiment_name, settings|
- if alternatives = value_for(settings, :alternatives)
- experiment_config[experiment_name.to_sym][:alternatives] = normalize_alternatives(alternatives)
- end
+ return nil if @experiments.nil?
- if goals = value_for(settings, :goals)
- experiment_config[experiment_name.to_sym][:goals] = goals
- end
-
- if metadata = value_for(settings, :metadata)
- experiment_config[experiment_name.to_sym][:metadata] = metadata
- end
-
- if algorithm = value_for(settings, :algorithm)
- experiment_config[experiment_name.to_sym][:algorithm] = algorithm
- end
+ experiment_config = {}
+ @experiments.keys.each do |name|
+ experiment_config[name.to_sym] = {}
+ end
- if (resettable = value_for(settings, :resettable)) != nil
- experiment_config[experiment_name.to_sym][:resettable] = resettable
- end
+ @experiments.each do |experiment_name, settings|
+ alternatives = if (alts = value_for(settings, :alternatives))
+ normalize_alternatives(alts)
+ end
+
+ experiment_data = {
+ alternatives: alternatives,
+ goals: value_for(settings, :goals),
+ metadata: value_for(settings, :metadata),
+ algorithm: value_for(settings, :algorithm),
+ resettable: value_for(settings, :resettable)
+ }
+
+ experiment_data.each do |name, value|
+ experiment_config[experiment_name.to_sym][name] = value if value != nil
end
-
- experiment_config
end
+
+ experiment_config
end
def normalize_alternatives(alternatives)
| 22 | Simplify Configuration#normalized_experiments | 28 | .rb | rb | mit | splitrb/split |
10071123 | <NME> configuration.rb
<BEF> # frozen_string_literal: true
module Split
class Configuration
attr_accessor :ignore_ip_addresses
attr_accessor :ignore_filter
attr_accessor :db_failover
attr_accessor :db_failover_on_db_error
attr_accessor :db_failover_allow_parameter_override
attr_accessor :allow_multiple_experiments
attr_accessor :enabled
attr_accessor :persistence
attr_accessor :persistence_cookie_length
attr_accessor :persistence_cookie_domain
attr_accessor :algorithm
attr_accessor :store_override
attr_accessor :start_manually
attr_accessor :reset_manually
attr_accessor :on_trial
attr_accessor :on_trial_choose
attr_accessor :on_trial_complete
attr_accessor :on_experiment_reset
attr_accessor :on_experiment_delete
attr_accessor :on_before_experiment_reset
attr_accessor :on_experiment_winner_choose
attr_accessor :on_before_experiment_delete
attr_accessor :include_rails_helper
attr_accessor :beta_probability_simulations
attr_accessor :winning_alternative_recalculation_interval
attr_accessor :redis
attr_accessor :dashboard_pagination_default_per_page
attr_accessor :cache
attr_reader :experiments
attr_writer :bots
attr_writer :robot_regex
def bots
@bots ||= {
# Indexers
"AdsBot-Google" => "Google Adwords",
"Baidu" => "Chinese search engine",
"Baiduspider" => "Chinese search engine",
"bingbot" => "Microsoft bing bot",
"Butterfly" => "Topsy Labs",
"Gigabot" => "Gigabot spider",
"Googlebot" => "Google spider",
"MJ12bot" => "Majestic-12 spider",
"msnbot" => "Microsoft bot",
"rogerbot" => "SeoMoz spider",
"PaperLiBot" => "PaperLi is another content curation service",
"Slurp" => "Yahoo spider",
"Sogou" => "Chinese search engine",
"spider" => "generic web spider",
"UnwindFetchor" => "Gnip crawler",
"WordPress" => "WordPress spider",
"YandexAccessibilityBot" => "Yandex accessibility spider",
"YandexBot" => "Yandex spider",
"YandexMobileBot" => "Yandex mobile spider",
"ZIBB" => "ZIBB spider",
# HTTP libraries
"Apache-HttpClient" => "Java http library",
"AppEngine-Google" => "Google App Engine",
"curl" => "curl unix CLI http client",
"ColdFusion" => "ColdFusion http library",
"EventMachine HttpClient" => "Ruby http library",
"Go http package" => "Go http library",
"Go-http-client" => "Go http library",
"Java" => "Generic Java http library",
"libwww-perl" => "Perl client-server library loved by script kids",
"lwp-trivial" => "Another Perl library loved by script kids",
"Python-urllib" => "Python http library",
"PycURL" => "Python http library",
"Test Certificate Info" => "C http library?",
"Typhoeus" => "Ruby http library",
"Wget" => "wget unix CLI http client",
# URL expanders / previewers
"awe.sm" => "Awe.sm URL expander",
"bitlybot" => "bit.ly bot",
"[email protected]" => "Linkfluence bot",
"facebookexternalhit" => "facebook bot",
"Facebot" => "Facebook crawler",
"Feedfetcher-Google" => "Google Feedfetcher",
"https://developers.google.com/+/web/snippet" => "Google+ Snippet Fetcher",
"LinkedInBot" => "LinkedIn bot",
"LongURL" => "URL expander service",
"NING" => "NING - Yet Another Twitter Swarmer",
"Pinterestbot" => "Pinterest Bot",
"redditbot" => "Reddit Bot",
"ShortLinkTranslate" => "Link shortener",
"Slackbot" => "Slackbot link expander",
"TweetmemeBot" => "TweetMeMe Crawler",
"Twitterbot" => "Twitter URL expander",
"UnwindFetch" => "Gnip URL expander",
"vkShare" => "VKontake Sharer",
# Uptime monitoring
"check_http" => "Nagios monitor",
"GoogleStackdriverMonitoring" => "Google Cloud monitor",
"NewRelicPinger" => "NewRelic monitor",
"Panopta" => "Monitoring service",
"Pingdom" => "Pingdom monitoring",
"SiteUptime" => "Site monitoring services",
"UptimeRobot" => "Monitoring service",
# ???
"DigitalPersona Fingerprint Software" => "HP Fingerprint scanner",
"ShowyouBot" => "Showyou iOS app spider",
"ZyBorg" => "Zyborg? Hmmm....",
"ELB-HealthChecker" => "ELB Health Check"
}
end
def experiments=(experiments)
raise InvalidExperimentsFormatError.new("Experiments must be a Hash") unless experiments.respond_to?(:keys)
@experiments = experiments
end
def disabled?
!enabled
end
def experiment_for(name)
if normalized_experiments
# TODO symbols
normalized_experiments[name.to_sym]
end
def normalized_experiments
if @experiments.nil?
nil
else
experiment_config = {}
@experiments.keys.each do |name|
experiment_config[name.to_sym] = {}
end
@experiments.each do |experiment_name, settings|
if alternatives = value_for(settings, :alternatives)
experiment_config[experiment_name.to_sym][:alternatives] = normalize_alternatives(alternatives)
end
if goals = value_for(settings, :goals)
experiment_config[experiment_name.to_sym][:goals] = goals
end
if metadata = value_for(settings, :metadata)
experiment_config[experiment_name.to_sym][:metadata] = metadata
end
if algorithm = value_for(settings, :algorithm)
experiment_config[experiment_name.to_sym][:algorithm] = algorithm
end
if (resettable = value_for(settings, :resettable)) != nil
experiment_config[experiment_name.to_sym][:resettable] = resettable
end
end
experiment_config
end
end
def normalize_alternatives(alternatives)
experiment_data = {
alternatives: alternatives,
goals: value_for(settings, :goals),
metadata: value_for(settings, :metadata),
algorithm: value_for(settings, :algorithm),
resettable: value_for(settings, :resettable)
}
experiment_data.each do |name, value|
experiment_config[experiment_name.to_sym][name] = value if value != nil
end
end
experiment_config
end
def normalize_alternatives(alternatives)
given_probability, num_with_probability = alternatives.inject([0, 0]) do |a, v|
p, n = a
if percent = value_for(v, :percent)
[p + percent, n + 1]
else
a
end
end
num_without_probability = alternatives.length - num_with_probability
unassigned_probability = ((100.0 - given_probability) / num_without_probability / 100.0)
if num_with_probability.nonzero?
alternatives = alternatives.map do |v|
if (name = value_for(v, :name)) && (percent = value_for(v, :percent))
{ name => percent / 100.0 }
elsif name = value_for(v, :name)
{ name => unassigned_probability }
else
{ v => unassigned_probability }
end
end
[alternatives.shift, alternatives]
else
alternatives = alternatives.dup
[alternatives.shift, alternatives]
end
end
def robot_regex
@robot_regex ||= /\b(?:#{escaped_bots.join('|')})\b|\A\W*\z/i
end
def initialize
@ignore_ip_addresses = []
@ignore_filter = proc { |request| is_robot? || is_ignored_ip_address? }
@db_failover = false
@db_failover_on_db_error = proc { |error| } # e.g. use Rails logger here
@on_experiment_reset = proc { |experiment| }
@on_experiment_delete = proc { |experiment| }
@on_before_experiment_reset = proc { |experiment| }
@on_before_experiment_delete = proc { |experiment| }
@on_experiment_winner_choose = proc { |experiment| }
@db_failover_allow_parameter_override = false
@allow_multiple_experiments = false
@enabled = true
@experiments = {}
@persistence = Split::Persistence::SessionAdapter
@persistence_cookie_length = 31536000 # One year from now
@persistence_cookie_domain = nil
@algorithm = Split::Algorithms::WeightedSample
@include_rails_helper = true
@beta_probability_simulations = 10000
@winning_alternative_recalculation_interval = 60 * 60 * 24 # 1 day
@redis = ENV.fetch(ENV.fetch("REDIS_PROVIDER", "REDIS_URL"), "redis://localhost:6379")
@dashboard_pagination_default_per_page = 10
end
private
def value_for(hash, key)
if hash.kind_of?(Hash)
hash.has_key?(key.to_s) ? hash[key.to_s] : hash[key.to_sym]
end
end
def escaped_bots
bots.map { |key, _| Regexp.escape(key) }
end
end
end
<MSG> Simplify Configuration#normalized_experiments
- Convert `if condition nil...else` clause to `return nil if condition`
- Store experiment_data in a hash and then iterate over all values and
assign them to experiment_config only when set
**Assumption**: I assume that settings[key] where key in (:goals,
:alternatives, :metadata, :algorithm) will never return false.
settings[:resettable], will return false if set to false.
<DFF> @@ -130,38 +130,32 @@ module Split
end
def normalized_experiments
- if @experiments.nil?
- nil
- else
- experiment_config = {}
- @experiments.keys.each do |name|
- experiment_config[name.to_sym] = {}
- end
-
- @experiments.each do |experiment_name, settings|
- if alternatives = value_for(settings, :alternatives)
- experiment_config[experiment_name.to_sym][:alternatives] = normalize_alternatives(alternatives)
- end
+ return nil if @experiments.nil?
- if goals = value_for(settings, :goals)
- experiment_config[experiment_name.to_sym][:goals] = goals
- end
-
- if metadata = value_for(settings, :metadata)
- experiment_config[experiment_name.to_sym][:metadata] = metadata
- end
-
- if algorithm = value_for(settings, :algorithm)
- experiment_config[experiment_name.to_sym][:algorithm] = algorithm
- end
+ experiment_config = {}
+ @experiments.keys.each do |name|
+ experiment_config[name.to_sym] = {}
+ end
- if (resettable = value_for(settings, :resettable)) != nil
- experiment_config[experiment_name.to_sym][:resettable] = resettable
- end
+ @experiments.each do |experiment_name, settings|
+ alternatives = if (alts = value_for(settings, :alternatives))
+ normalize_alternatives(alts)
+ end
+
+ experiment_data = {
+ alternatives: alternatives,
+ goals: value_for(settings, :goals),
+ metadata: value_for(settings, :metadata),
+ algorithm: value_for(settings, :algorithm),
+ resettable: value_for(settings, :resettable)
+ }
+
+ experiment_data.each do |name, value|
+ experiment_config[experiment_name.to_sym][name] = value if value != nil
end
-
- experiment_config
end
+
+ experiment_config
end
def normalize_alternatives(alternatives)
| 22 | Simplify Configuration#normalized_experiments | 28 | .rb | rb | mit | splitrb/split |
10071124 | <NME> configuration.rb
<BEF> # frozen_string_literal: true
module Split
class Configuration
attr_accessor :ignore_ip_addresses
attr_accessor :ignore_filter
attr_accessor :db_failover
attr_accessor :db_failover_on_db_error
attr_accessor :db_failover_allow_parameter_override
attr_accessor :allow_multiple_experiments
attr_accessor :enabled
attr_accessor :persistence
attr_accessor :persistence_cookie_length
attr_accessor :persistence_cookie_domain
attr_accessor :algorithm
attr_accessor :store_override
attr_accessor :start_manually
attr_accessor :reset_manually
attr_accessor :on_trial
attr_accessor :on_trial_choose
attr_accessor :on_trial_complete
attr_accessor :on_experiment_reset
attr_accessor :on_experiment_delete
attr_accessor :on_before_experiment_reset
attr_accessor :on_experiment_winner_choose
attr_accessor :on_before_experiment_delete
attr_accessor :include_rails_helper
attr_accessor :beta_probability_simulations
attr_accessor :winning_alternative_recalculation_interval
attr_accessor :redis
attr_accessor :dashboard_pagination_default_per_page
attr_accessor :cache
attr_reader :experiments
attr_writer :bots
attr_writer :robot_regex
def bots
@bots ||= {
# Indexers
"AdsBot-Google" => "Google Adwords",
"Baidu" => "Chinese search engine",
"Baiduspider" => "Chinese search engine",
"bingbot" => "Microsoft bing bot",
"Butterfly" => "Topsy Labs",
"Gigabot" => "Gigabot spider",
"Googlebot" => "Google spider",
"MJ12bot" => "Majestic-12 spider",
"msnbot" => "Microsoft bot",
"rogerbot" => "SeoMoz spider",
"PaperLiBot" => "PaperLi is another content curation service",
"Slurp" => "Yahoo spider",
"Sogou" => "Chinese search engine",
"spider" => "generic web spider",
"UnwindFetchor" => "Gnip crawler",
"WordPress" => "WordPress spider",
"YandexAccessibilityBot" => "Yandex accessibility spider",
"YandexBot" => "Yandex spider",
"YandexMobileBot" => "Yandex mobile spider",
"ZIBB" => "ZIBB spider",
# HTTP libraries
"Apache-HttpClient" => "Java http library",
"AppEngine-Google" => "Google App Engine",
"curl" => "curl unix CLI http client",
"ColdFusion" => "ColdFusion http library",
"EventMachine HttpClient" => "Ruby http library",
"Go http package" => "Go http library",
"Go-http-client" => "Go http library",
"Java" => "Generic Java http library",
"libwww-perl" => "Perl client-server library loved by script kids",
"lwp-trivial" => "Another Perl library loved by script kids",
"Python-urllib" => "Python http library",
"PycURL" => "Python http library",
"Test Certificate Info" => "C http library?",
"Typhoeus" => "Ruby http library",
"Wget" => "wget unix CLI http client",
# URL expanders / previewers
"awe.sm" => "Awe.sm URL expander",
"bitlybot" => "bit.ly bot",
"[email protected]" => "Linkfluence bot",
"facebookexternalhit" => "facebook bot",
"Facebot" => "Facebook crawler",
"Feedfetcher-Google" => "Google Feedfetcher",
"https://developers.google.com/+/web/snippet" => "Google+ Snippet Fetcher",
"LinkedInBot" => "LinkedIn bot",
"LongURL" => "URL expander service",
"NING" => "NING - Yet Another Twitter Swarmer",
"Pinterestbot" => "Pinterest Bot",
"redditbot" => "Reddit Bot",
"ShortLinkTranslate" => "Link shortener",
"Slackbot" => "Slackbot link expander",
"TweetmemeBot" => "TweetMeMe Crawler",
"Twitterbot" => "Twitter URL expander",
"UnwindFetch" => "Gnip URL expander",
"vkShare" => "VKontake Sharer",
# Uptime monitoring
"check_http" => "Nagios monitor",
"GoogleStackdriverMonitoring" => "Google Cloud monitor",
"NewRelicPinger" => "NewRelic monitor",
"Panopta" => "Monitoring service",
"Pingdom" => "Pingdom monitoring",
"SiteUptime" => "Site monitoring services",
"UptimeRobot" => "Monitoring service",
# ???
"DigitalPersona Fingerprint Software" => "HP Fingerprint scanner",
"ShowyouBot" => "Showyou iOS app spider",
"ZyBorg" => "Zyborg? Hmmm....",
"ELB-HealthChecker" => "ELB Health Check"
}
end
def experiments=(experiments)
raise InvalidExperimentsFormatError.new("Experiments must be a Hash") unless experiments.respond_to?(:keys)
@experiments = experiments
end
def disabled?
!enabled
end
def experiment_for(name)
if normalized_experiments
# TODO symbols
normalized_experiments[name.to_sym]
end
def normalized_experiments
if @experiments.nil?
nil
else
experiment_config = {}
@experiments.keys.each do |name|
experiment_config[name.to_sym] = {}
end
@experiments.each do |experiment_name, settings|
if alternatives = value_for(settings, :alternatives)
experiment_config[experiment_name.to_sym][:alternatives] = normalize_alternatives(alternatives)
end
if goals = value_for(settings, :goals)
experiment_config[experiment_name.to_sym][:goals] = goals
end
if metadata = value_for(settings, :metadata)
experiment_config[experiment_name.to_sym][:metadata] = metadata
end
if algorithm = value_for(settings, :algorithm)
experiment_config[experiment_name.to_sym][:algorithm] = algorithm
end
if (resettable = value_for(settings, :resettable)) != nil
experiment_config[experiment_name.to_sym][:resettable] = resettable
end
end
experiment_config
end
end
def normalize_alternatives(alternatives)
experiment_data = {
alternatives: alternatives,
goals: value_for(settings, :goals),
metadata: value_for(settings, :metadata),
algorithm: value_for(settings, :algorithm),
resettable: value_for(settings, :resettable)
}
experiment_data.each do |name, value|
experiment_config[experiment_name.to_sym][name] = value if value != nil
end
end
experiment_config
end
def normalize_alternatives(alternatives)
given_probability, num_with_probability = alternatives.inject([0, 0]) do |a, v|
p, n = a
if percent = value_for(v, :percent)
[p + percent, n + 1]
else
a
end
end
num_without_probability = alternatives.length - num_with_probability
unassigned_probability = ((100.0 - given_probability) / num_without_probability / 100.0)
if num_with_probability.nonzero?
alternatives = alternatives.map do |v|
if (name = value_for(v, :name)) && (percent = value_for(v, :percent))
{ name => percent / 100.0 }
elsif name = value_for(v, :name)
{ name => unassigned_probability }
else
{ v => unassigned_probability }
end
end
[alternatives.shift, alternatives]
else
alternatives = alternatives.dup
[alternatives.shift, alternatives]
end
end
def robot_regex
@robot_regex ||= /\b(?:#{escaped_bots.join('|')})\b|\A\W*\z/i
end
def initialize
@ignore_ip_addresses = []
@ignore_filter = proc { |request| is_robot? || is_ignored_ip_address? }
@db_failover = false
@db_failover_on_db_error = proc { |error| } # e.g. use Rails logger here
@on_experiment_reset = proc { |experiment| }
@on_experiment_delete = proc { |experiment| }
@on_before_experiment_reset = proc { |experiment| }
@on_before_experiment_delete = proc { |experiment| }
@on_experiment_winner_choose = proc { |experiment| }
@db_failover_allow_parameter_override = false
@allow_multiple_experiments = false
@enabled = true
@experiments = {}
@persistence = Split::Persistence::SessionAdapter
@persistence_cookie_length = 31536000 # One year from now
@persistence_cookie_domain = nil
@algorithm = Split::Algorithms::WeightedSample
@include_rails_helper = true
@beta_probability_simulations = 10000
@winning_alternative_recalculation_interval = 60 * 60 * 24 # 1 day
@redis = ENV.fetch(ENV.fetch("REDIS_PROVIDER", "REDIS_URL"), "redis://localhost:6379")
@dashboard_pagination_default_per_page = 10
end
private
def value_for(hash, key)
if hash.kind_of?(Hash)
hash.has_key?(key.to_s) ? hash[key.to_s] : hash[key.to_sym]
end
end
def escaped_bots
bots.map { |key, _| Regexp.escape(key) }
end
end
end
<MSG> Simplify Configuration#normalized_experiments
- Convert `if condition nil...else` clause to `return nil if condition`
- Store experiment_data in a hash and then iterate over all values and
assign them to experiment_config only when set
**Assumption**: I assume that settings[key] where key in (:goals,
:alternatives, :metadata, :algorithm) will never return false.
settings[:resettable], will return false if set to false.
<DFF> @@ -130,38 +130,32 @@ module Split
end
def normalized_experiments
- if @experiments.nil?
- nil
- else
- experiment_config = {}
- @experiments.keys.each do |name|
- experiment_config[name.to_sym] = {}
- end
-
- @experiments.each do |experiment_name, settings|
- if alternatives = value_for(settings, :alternatives)
- experiment_config[experiment_name.to_sym][:alternatives] = normalize_alternatives(alternatives)
- end
+ return nil if @experiments.nil?
- if goals = value_for(settings, :goals)
- experiment_config[experiment_name.to_sym][:goals] = goals
- end
-
- if metadata = value_for(settings, :metadata)
- experiment_config[experiment_name.to_sym][:metadata] = metadata
- end
-
- if algorithm = value_for(settings, :algorithm)
- experiment_config[experiment_name.to_sym][:algorithm] = algorithm
- end
+ experiment_config = {}
+ @experiments.keys.each do |name|
+ experiment_config[name.to_sym] = {}
+ end
- if (resettable = value_for(settings, :resettable)) != nil
- experiment_config[experiment_name.to_sym][:resettable] = resettable
- end
+ @experiments.each do |experiment_name, settings|
+ alternatives = if (alts = value_for(settings, :alternatives))
+ normalize_alternatives(alts)
+ end
+
+ experiment_data = {
+ alternatives: alternatives,
+ goals: value_for(settings, :goals),
+ metadata: value_for(settings, :metadata),
+ algorithm: value_for(settings, :algorithm),
+ resettable: value_for(settings, :resettable)
+ }
+
+ experiment_data.each do |name, value|
+ experiment_config[experiment_name.to_sym][name] = value if value != nil
end
-
- experiment_config
end
+
+ experiment_config
end
def normalize_alternatives(alternatives)
| 22 | Simplify Configuration#normalized_experiments | 28 | .rb | rb | mit | splitrb/split |
10071125 | <NME> README.md
<BEF> # [Split](https://libraries.io/rubygems/split)
[](http://badge.fury.io/rb/split)

[](https://codeclimate.com/github/splitrb/split)
[](https://codeclimate.com/github/splitrb/split/coverage)
[](https://github.com/RichardLitt/standard-readme)
[](https://www.codetriage.com/splitrb/split)
> 📈 The Rack Based A/B testing framework https://libraries.io/rubygems/split
Split is a rack based A/B testing framework designed to work with Rails, Sinatra or any other rack based app.
Split is heavily inspired by the [Abingo](https://github.com/ryanb/abingo) and [Vanity](https://github.com/assaf/vanity) Rails A/B testing plugins and [Resque](https://github.com/resque/resque) in its use of Redis.
Split is designed to be hacker friendly, allowing for maximum customisation and extensibility.
## Install
### Requirements
Split v4.0+ is currently tested with Ruby >= 2.5 and Rails >= 5.2.
If your project requires compatibility with Ruby 2.4.x or older Rails versions. You can try v3.0 or v0.8.0(for Ruby 1.9.3)
Split uses Redis as a datastore.
Split only supports Redis 4.0 or greater.
If you're on OS X, Homebrew is the simplest way to install Redis:
```bash
brew install redis
redis-server /usr/local/etc/redis.conf
```
You now have a Redis daemon running on port `6379`.
### Setup
```bash
gem install split
```
#### Rails
Adding `gem 'split'` to your Gemfile will autoload it when rails starts up, as long as you've configured Redis it will 'just work'.
#### Sinatra
To configure Sinatra with Split you need to enable sessions and mix in the helper methods. Add the following lines at the top of your Sinatra app:
```ruby
require 'split'
class MySinatraApp < Sinatra::Base
enable :sessions
helpers Split::Helper
get '/' do
...
end
```
## Usage
To begin your A/B test use the `ab_test` method, naming your experiment with the first argument and then the different alternatives which you wish to test on as the other arguments.
Example: View
```erb
<% ab_test("login_button", "/images/button1.jpg", "/images/button2.jpg") do |button_file| %>
<%= image_tag(button_file, :alt => "Login!") %>
<% end %>
```
```erb
```ruby
def register_new_user
# See what level of free points maximizes users' decision to buy replacement points.
@starter_points = ab_test("new_user_free_points", '100', '200', '300')
end
```
```ruby
def register_new_user
```ruby
def buy_new_points
# some business logic
finished("new_user_free_points")
end
```
Example: Conversion tracking (in a view)
```erb
Thanks for signing up, dude! <% finished("signup_page_redesign") %>
```
You can find more examples, tutorials and guides on the [wiki](https://github.com/splitrb/split/wiki).
Example: Conversion tracking (in a view)
```erb
Thanks for signing up, dude! <% ab_finished(:signup_page_redesign) %>
```
You can find more examples, tutorials and guides on the [wiki](https://github.com/splitrb/split/wiki).
## Statistical Validity
Split has two options for you to use to determine which alternative is the best.
The first option (default on the dashboard) uses a z test (n>30) for the difference between your control and alternative conversion rates to calculate statistical significance. This test will tell you whether an alternative is better or worse than your control, but it will not distinguish between which alternative is the best in an experiment with multiple alternatives. Split will only tell you if your experiment is 90%, 95%, or 99% significant, and this test only works if you have more than 30 participants and 5 conversions for each branch.
As per this [blog post](https://www.evanmiller.org/how-not-to-run-an-ab-test.html) on the pitfalls of A/B testing, it is highly recommended that you determine your requisite sample size for each branch before running the experiment. Otherwise, you'll have an increased rate of false positives (experiments which show a significant effect where really there is none).
[Here](https://www.evanmiller.org/ab-testing/sample-size.html) is a sample size calculator for your convenience.
The second option uses simulations from a beta distribution to determine the probability that the given alternative is the winner compared to all other alternatives. You can view these probabilities by clicking on the drop-down menu labeled "Confidence." This option should be used when the experiment has more than just 1 control and 1 alternative. It can also be used for a simple, 2-alternative A/B test.
To do this you can pass a weight with each alternative in the following ways:
```ruby
ab_test('homepage design', {'Old' => 18}, {'New' => 2})
ab_test('homepage design', 'Old', {'New' => 1.0/9})
ab_test('homepage design', {'Old' => 9}, 'New')
```
This will only show the new alternative to visitors 1 in 10 times, the default weight for an alternative is 1.
Perhaps you only want to show an alternative to 10% of your visitors because it is very experimental or not yet fully load tested.
To do this you can pass a weight with each alternative in the following ways:
```ruby
ab_test(:homepage_design, {'Old' => 18}, {'New' => 2})
ab_test(:homepage_design, 'Old', {'New' => 1.0/9})
ab_test(:homepage_design, {'Old' => 9}, 'New')
```
This will only show the new alternative to visitors 1 in 10 times, the default weight for an alternative is 1.
### Overriding alternatives
For development and testing, you may wish to force your app to always return an alternative.
You can do this by passing it as a parameter in the url.
If you have an experiment called `button_color` with alternatives called `red` and `blue` used on your homepage, a url such as:
http://myawesomesite.com?ab_test[button_color]=red
will always have red buttons. This won't be stored in your session or count towards to results, unless you set the `store_override` configuration option.
In the event you want to disable all tests without having to know the individual experiment names, add a `SPLIT_DISABLE` query parameter.
http://myawesomesite.com?SPLIT_DISABLE=true
To stop this behaviour you can pass the following option to the `finished` method:
```ruby
finished('experiment_name', :reset => false)
```
The user will then always see the alternative they started with.
```ruby
# Create a file with these contents at 'spec/support/split_helper.rb'
# and ensure it is `require`d in your rails_helper.rb or spec_helper.rb
module SplitHelper
# Force a specific experiment alternative to always be returned:
# use_ab_test(signup_form: "single_page")
#
# Force alternatives for multiple experiments:
# use_ab_test(signup_form: "single_page", pricing: "show_enterprise_prices")
#
def use_ab_test(alternatives_by_experiment)
allow_any_instance_of(Split::Helper).to receive(:ab_test) do |_receiver, experiment, &block|
variant = alternatives_by_experiment.fetch(experiment) { |key| raise "Unknown experiment '#{key}'" }
block.call(variant) unless block.nil?
variant
end
end
end
# Make the `use_ab_test` method available to all specs:
RSpec.configure do |config|
config.include SplitHelper
end
```
Now you can call `use_ab_test(alternatives_by_experiment)` in your specs, for example:
```ruby
it "registers using experimental signup" do
use_ab_test experiment_name: "alternative_name"
post "/signups"
...
end
```
```ruby
Split.configure do |config|
config.persistence = Split::Persistence::RedisAdapter.with_config(:lookup_by => proc { |context| context.current_user_id })
# Equivalent
# config.persistence = Split::Persistence::RedisAdapter.with_config(:lookup_by => :current_user_id }
end
```
### Reset after completion
When a user completes a test their session is reset so that they may start the test again in the future.
To stop this behaviour you can pass the following option to the `ab_finished` method:
```ruby
ab_finished(:experiment_name, reset: false)
```
The user will then always see the alternative they started with.
Any old unfinished experiment key will be deleted from the user's data storage if the experiment had been removed or is over and a winner had been chosen. This allows a user to enroll into any new experiment in cases when the `allow_multiple_experiments` config option is set to `false`.
### Reset experiments manually
By default Split automatically resets the experiment whenever it detects the configuration for an experiment has changed (e.g. you call `ab_test` with different alternatives). You can prevent this by setting the option `reset_manually` to `true`.
You may want to do this when you want to change something, like the variants' names, the metadata about an experiment, etc. without resetting everything.
### Multiple experiments at once
By default Split will avoid users participating in multiple experiments at once. This means you are less likely to skew results by adding in more variation to your tests.
To stop this behaviour and allow users to participate in multiple experiments at once set the `allow_multiple_experiments` config option to true like so:
```ruby
Split.configure do |config|
config.allow_multiple_experiments = true
end
```
This will allow the user to participate in any number of experiments and belong to any alternative in each experiment. This has the possible downside of a variation in one experiment influencing the outcome of another.
To address this, setting the `allow_multiple_experiments` config option to 'control' like so:
```ruby
Split.configure do |config|
config.allow_multiple_experiments = 'control'
end
```
For this to work, each and every experiment you define must have an alternative named 'control'. This will allow the user to participate in multiple experiments as long as the user belongs to the alternative 'control' in each experiment. As soon as the user belongs to an alternative named something other than 'control' the user may not participate in any more experiments. Calling ab_test(<other experiments>) will always return the first alternative without adding the user to that experiment.
### Experiment Persistence
Split comes with three built-in persistence adapters for storing users and the alternatives they've been given for each experiment.
By default Split will store the tests for each user in the session.
You can optionally configure Split to use a cookie, Redis, or any custom adapter of your choosing.
#### Cookies
```ruby
Split.configure do |config|
config.persistence = :cookie
end
```
When using the cookie persistence, Split stores data into an anonymous tracking cookie named 'split', which expires in 1 year. To change that, set the `persistence_cookie_length` in the configuration (unit of time in seconds).
```ruby
Split.configure do |config|
config.persistence = :cookie
config.persistence_cookie_length = 2592000 # 30 days
``` ruby
Split.configure do |config|
config.on_experiment_reset = proc{ |experiment| # Do something on reset }
config.on_experiment_delete = proc{ |experiment| # Do something else on delete }
end
```
Using Redis will allow ab_users to persist across sessions or machines.
```ruby
Split.configure do |config|
config.persistence = Split::Persistence::RedisAdapter.with_config(lookup_by: -> (context) { context.current_user_id })
# Equivalent
# config.persistence = Split::Persistence::RedisAdapter.with_config(lookup_by: :current_user_id)
end
```
Options:
* `lookup_by`: method to invoke per request for uniquely identifying ab_users (mandatory configuration)
* `namespace`: separate namespace to store these persisted values (default "persistence")
However, if you are using Rails 3: You can mount this inside your app routes by first adding this to the Gemfile:
```ruby
gem 'split', :require => 'split/dashboard'
```
Then adding this to config/routes.rb
```ruby
mount Split::Dashboard, :at => 'split'
```
You may want to password protect that page, you can do so with `Rack::Auth::Basic` (in your split initializer file)
config.persistence = Split::Persistence::DualAdapter.with_config(
logged_in: -> (context) { !context.send(:current_user).try(:id).nil? },
logged_in_adapter: redis_adapter,
logged_out_adapter: cookie_adapter)
config.persistence_cookie_length = 2592000 # 30 days
end
You can even use Devise or any other Warden-based authentication method to authorize users. Just replace `mount Split::Dashboard, :at => 'split'` in `config/routes.rb` with the following:
```ruby
match "/split" => Split::Dashboard, :anchor => false, :via => [:get, :post], :constraints => lambda { |request|
request.env['warden'].authenticated? # are we authenticated?
request.env['warden'].authenticate! # authenticate if not already
# or even check any other condition such as request.env['warden'].user.is_admin?
}
```
More information on this [here](http://steve.dynedge.co.uk/2011/12/09/controlling-access-to-routes-and-rack-apps-in-rails-3-with-devise-and-warden/)
```
### Trial Event Hooks
You can define methods that will be called at the same time as experiment
alternative participation and goal completion.
For example:
```ruby
Split.configure do |config|
config.db_failover = true # handle redis errors gracefully
config.db_failover_on_db_error = proc{|error| Rails.logger.error(error.message) }
config.allow_multiple_experiments = true
config.enabled = true
config.persistence = Split::Persistence::SessionAdapter
Set these attributes to a method name available in the same context as the
`ab_test` method. These methods should accept one argument, a `Trial` instance.
``` ruby
def log_trial(trial)
logger.info "experiment=%s alternative=%s user=%s" %
[ trial.experiment.name, trial.alternative, current_user.id ]
end
def log_trial_choose(trial)
logger.info "[new user] experiment=%s alternative=%s user=%s" %
[ trial.experiment.name, trial.alternative, current_user.id ]
end
def log_trial_complete(trial)
logger.info "experiment=%s alternative=%s user=%s complete=true" %
[ trial.experiment.name, trial.alternative, current_user.id ]
end
config.ignore_ip_addresses << '81.19.48.130' # or regex: /81\.19\.48\.[0-9]+/
# or provide your own filter functionality, the default is proc{ |request| is_robot? || is_ignored_ip_address? }
config.ignore_filter = proc{ |request| CustomExcludeLogic.excludes?(request) }
end
```
in the controller:
``` ruby
helper_method :log_trial_choose
def log_trial_choose(trial)
```ruby
Split.configure do |config|
config.experiments = {
"my_first_experiment" => {
:alternatives => ["a", "b"],
:resettable => false
},
"my_second_experiment" => {
:algorithm => 'Split::Algorithms::Whiplash',
:alternatives => [
{ :name => "a", :percent => 67 },
{ :name => "b", :percent => 33 }
]
}
}
config.on_experiment_delete = -> (experiment) { # Do something else on delete }
# before experiment reset or deleted
config.on_before_experiment_reset = -> (example) { # Do something on reset }
config.on_before_experiment_delete = -> (experiment) { # Do something else on delete }
# after experiment winner had been set
config.on_experiment_winner_choose = -> (experiment) { # Do something on winner choose }
end
```
## Web Interface
Split comes with a Sinatra-based front end to get an overview of how your experiments are doing.
If you are running Rails 2: You can mount this inside your app using Rack::URLMap in your `config.ru`
```ruby
require 'split/dashboard'
run Rack::URLMap.new \
"/" => Your::App.new,
"/split" => Split::Dashboard.new
```
However, if you are using Rails 3 or higher: You can mount this inside your app routes by first adding this to the Gemfile:
```ruby
gem 'split', require: 'split/dashboard'
This simplifies the calls from your code:
```ruby
ab_test("my_first_experiment")
```
and:
```ruby
finished("my_first_experiment")
```
You can also add meta data for each experiment, very useful when you need more than an alternative name to change behaviour:
# Protect against timing attacks:
# - Use & (do not use &&) so that it doesn't short circuit.
# - Use digests to stop length information leaking
ActiveSupport::SecurityUtils.secure_compare(::Digest::SHA256.hexdigest(username), ::Digest::SHA256.hexdigest(ENV["SPLIT_USERNAME"])) &
ActiveSupport::SecurityUtils.secure_compare(::Digest::SHA256.hexdigest(password), ::Digest::SHA256.hexdigest(ENV["SPLIT_PASSWORD"]))
end
# Apps without activesupport
Split::Dashboard.use Rack::Auth::Basic do |username, password|
# Protect against timing attacks:
# - Use & (do not use &&) so that it doesn't short circuit.
# - Use digests to stop length information leaking
Rack::Utils.secure_compare(::Digest::SHA256.hexdigest(username), ::Digest::SHA256.hexdigest(ENV["SPLIT_USERNAME"])) &
Rack::Utils.secure_compare(::Digest::SHA256.hexdigest(password), ::Digest::SHA256.hexdigest(ENV["SPLIT_PASSWORD"]))
end
```
You can even use Devise or any other Warden-based authentication method to authorize users. Just replace `mount Split::Dashboard, :at => 'split'` in `config/routes.rb` with the following:
```ruby
match "/split" => Split::Dashboard, anchor: false, via: [:get, :post, :delete], constraints: -> (request) do
request.env['warden'].authenticated? # are we authenticated?
request.env['warden'].authenticate! # authenticate if not already
# or even check any other condition such as request.env['warden'].user.is_admin?
end
```
More information on this [here](https://steve.dynedge.co.uk/2011/12/09/controlling-access-to-routes-and-rack-apps-in-rails-3-with-devise-and-warden/)
### Screenshot

## Configuration
You can override the default configuration options of Split like so:
```ruby
```ruby
Split.configure do |config|
config.experiments = {
"my_first_experiment" => {
:alternatives => ["a", "b"],
:metric => :my_metric,
}
}
end
config.redis = "redis://custom.redis.url:6380"
end
```
Split looks for the Redis host in the environment variable `REDIS_URL` then
defaults to `redis://localhost:6379` if not specified by configure block.
On platforms like Heroku, Split will use the value of `REDIS_PROVIDER` to
determine which env variable key to use when retrieving the host config. This
defaults to `REDIS_URL`.
### Filtering
In most scenarios you don't want to have AB-Testing enabled for web spiders, robots or special groups of users.
Split provides functionality to filter this based on a predefined, extensible list of bots, IP-lists or custom exclude logic.
```ruby
Split.configure do |config|
# bot config
The API to define goals for an experiment is this:
```ruby
ab_test({"link_color" => ["purchase", "refund"]}, "red", "blue")
```
or you can you can define them in a configuration file:
config.ignore_filter = -> (request) { CustomExcludeLogic.excludes?(request) }
```ruby
Split.configure do |config|
config.experiments = {
"link_color" => {
:alternatives => ["red", "blue"],
:goals => ["purchase", "refund"]
}
}
end
```ruby
Split.configure do |config|
To complete a goal conversion, you do it like:
```ruby
finished("link_color" => "purchase")
```
**NOTE:** This does not mean that a single experiment can have/complete progressive goals.
alternatives: [
{ name: "a", percent: 67 },
{ name: "b", percent: 33 }
]
}
}
end
```
You can also store your experiments in a YAML file:
```ruby
Split.configure do |config|
config.experiments = YAML.load_file "config/experiments.yml"
end
```
You can then define the YAML file like:
```yaml
my_first_experiment:
alternatives:
- a
- b
my_second_experiment:
alternatives:
- name: a
percent: 67
- name: b
percent: 33
resettable: false
```
This simplifies the calls from your code:
```ruby
ab_test(:my_first_experiment)
```
and:
```ruby
ab_finished(:my_first_experiment)
```
You can also add meta data for each experiment, which is very useful when you need more than an alternative name to change behaviour:
```ruby
Split.configure do |config|
config.experiments = {
my_first_experiment: {
alternatives: ["a", "b"],
metadata: {
"a" => {"text" => "Have a fantastic day"},
"b" => {"text" => "Don't get hit by a bus"}
}
}
}
end
```
```yaml
my_first_experiment:
alternatives:
- a
- b
metadata:
a:
text: "Have a fantastic day"
b:
text: "Don't get hit by a bus"
```
This allows for some advanced experiment configuration using methods like:
```ruby
trial.alternative.name # => "a"
trial.metadata['text'] # => "Have a fantastic day"
```
or in views:
```erb
<% ab_test("my_first_experiment") do |alternative, meta| %>
<%= alternative %>
<small><%= meta['text'] %></small>
<% end %>
```
The keys used in meta data should be Strings
#### Metrics
You might wish to track generic metrics, such as conversions, and use
those to complete multiple different experiments without adding more to
your code. You can use the configuration hash to do this, thanks to
the `:metric` option.
```ruby
Split.configure do |config|
config.experiments = {
my_first_experiment: {
alternatives: ["a", "b"],
metric: :my_metric
}
}
end
```
Your code may then track a completion using the metric instead of
the experiment name:
```ruby
ab_finished(:my_metric)
```
You can also create a new metric by instantiating and saving a new Metric object.
```ruby
Split::Metric.new(:my_metric)
Split::Metric.save
```
#### Goals
You might wish to allow an experiment to have multiple, distinguishable goals.
The API to define goals for an experiment is this:
```ruby
ab_test({link_color: ["purchase", "refund"]}, "red", "blue")
```
or you can define them in a configuration file:
```ruby
Split.configure do |config|
config.experiments = {
link_color: {
alternatives: ["red", "blue"],
goals: ["purchase", "refund"]
}
}
end
```
To complete a goal conversion, you do it like:
```ruby
ab_finished(link_color: "purchase")
```
Note that if you pass additional options, that should be a separate hash:
```ruby
ab_finished({ link_color: "purchase" }, reset: false)
```
**NOTE:** This does not mean that a single experiment can complete more than one goal.
Once you finish one of the goals, the test is considered to be completed, and finishing the other goal will no longer register. (Assuming the test runs with `reset: false`.)
**Good Example**: Test if listing Plan A first result in more conversions to Plan A (goal: "plana_conversion") or Plan B (goal: "planb_conversion").
**Bad Example**: Test if button color increases conversion rate through multiple steps of a funnel. THIS WILL NOT WORK.
**Bad Example**: Test both how button color affects signup *and* how it affects login, at the same time. THIS WILL NOT WORK.
#### Combined Experiments
If you want to test how button color affects signup *and* how it affects login at the same time, use combined experiments.
Configure like so:
```ruby
Split.configuration.experiments = {
:button_color_experiment => {
:alternatives => ["blue", "green"],
:combined_experiments => ["button_color_on_signup", "button_color_on_login"]
}
}
```
Starting the combined test starts all combined experiments
```ruby
ab_combined_test(:button_color_experiment)
```
Finish each combined test as normal
```ruby
ab_finished(:button_color_on_login)
ab_finished(:button_color_on_signup)
```
**Additional Configuration**:
* Be sure to enable `allow_multiple_experiments`
* In Sinatra include the CombinedExperimentsHelper
```
helpers Split::CombinedExperimentsHelper
```
### DB failover solution
Due to the fact that Redis has no automatic failover mechanism, it's
possible to switch on the `db_failover` config option, so that `ab_test`
and `ab_finished` will not crash in case of a db failure. `ab_test` always
delivers alternative A (the first one) in that case.
It's also possible to set a `db_failover_on_db_error` callback (proc)
for example to log these errors via Rails.logger.
### Redis
You may want to change the Redis host and port Split connects to, or
set various other options at startup.
Split has a `redis` setter which can be given a string or a Redis
object. This means if you're already using Redis in your app, Split
can re-use the existing connection.
String: `Split.redis = 'redis://localhost:6379'`
Redis: `Split.redis = $redis`
For our rails app we have a `config/initializers/split.rb` file where
we load `config/split.yml` by hand and set the Redis information
appropriately.
Here's our `config/split.yml`:
```yml
development: redis://localhost:6379
test: redis://localhost:6379
staging: redis://redis1.example.com:6379
fi: redis://localhost:6379
production: redis://redis1.example.com:6379
```
And our initializer:
```ruby
split_config = YAML.load_file(Rails.root.join('config', 'split.yml'))
Split.redis = split_config[Rails.env]
```
### Redis Caching (v4.0+)
In some high-volume usage scenarios, Redis load can be incurred by repeated
fetches for fairly static data. Enabling caching will reduce this load.
```ruby
Split.configuration.cache = true
````
This currently caches:
- `Split::ExperimentCatalog.find`
- `Split::Experiment.start_time`
- `Split::Experiment.winner`
## Namespaces
If you're running multiple, separate instances of Split you may want
to namespace the keyspaces so they do not overlap. This is not unlike
the approach taken by many memcached clients.
This feature can be provided by the [redis-namespace](https://github.com/defunkt/redis-namespace)
library. To configure Split to use `Redis::Namespace`, do the following:
1. Add `redis-namespace` to your Gemfile:
```ruby
gem 'redis-namespace'
```
2. Configure `Split.redis` to use a `Redis::Namespace` instance (possible in an
initializer):
```ruby
redis = Redis.new(url: ENV['REDIS_URL']) # or whatever config you want
Split.redis = Redis::Namespace.new(:your_namespace, redis: redis)
```
## Outside of a Web Session
Split provides the Helper module to facilitate running experiments inside web sessions.
Alternatively, you can access the underlying Metric, Trial, Experiment and Alternative objects to
conduct experiments that are not tied to a web session.
```ruby
# create a new experiment
experiment = Split::ExperimentCatalog.find_or_create('color', 'red', 'blue')
# create a new trial
trial = Split::Trial.new(:experiment => experiment)
# run trial
trial.choose!
# get the result, returns either red or blue
trial.alternative.name
# if the goal has been achieved, increment the successful completions for this alternative.
if goal_achieved?
trial.complete!
end
```
## Algorithms
By default, Split ships with `Split::Algorithms::WeightedSample` that randomly selects from possible alternatives for a traditional a/b test.
It is possible to specify static weights to favor certain alternatives.
`Split::Algorithms::Whiplash` is an implementation of a [multi-armed bandit algorithm](http://stevehanov.ca/blog/index.php?id=132).
This algorithm will automatically weight the alternatives based on their relative performance,
choosing the better-performing ones more often as trials are completed.
`Split::Algorithms::BlockRandomization` is an algorithm that ensures equal
participation across all alternatives. This algorithm will choose the alternative
with the fewest participants. In the event of multiple minimum participant alternatives
(i.e. starting a new "Block") the algorithm will choose a random alternative from
those minimum participant alternatives.
Users may also write their own algorithms. The default algorithm may be specified globally in the configuration file, or on a per experiment basis using the experiments hash of the configuration file.
To change the algorithm globally for all experiments, use the following in your initializer:
```ruby
Split.configure do |config|
config.algorithm = Split::Algorithms::Whiplash
end
```
## Extensions
- [Split::Export](https://github.com/splitrb/split-export) - Easily export A/B test data out of Split.
- [Split::Analytics](https://github.com/splitrb/split-analytics) - Push test data to Google Analytics.
- [Split::Mongoid](https://github.com/MongoHQ/split-mongoid) - Store experiment data in mongoid (still uses redis).
- [Split::Cacheable](https://github.com/harrystech/split_cacheable) - Automatically create cache buckets per test.
- [Split::Counters](https://github.com/bernardkroes/split-counters) - Add counters per experiment and alternative.
- [Split::Cli](https://github.com/craigmcnamara/split-cli) - A CLI to trigger Split A/B tests.
## Screencast
Ryan bates has produced an excellent 10 minute screencast about split on the Railscasts site: [A/B Testing with Split](http://railscasts.com/episodes/331-a-b-testing-with-split)
## Blogposts
* [Recipe: A/B testing with KISSMetrics and the split gem](https://robots.thoughtbot.com/post/9595887299/recipe-a-b-testing-with-kissmetrics-and-the-split-gem)
* [Rails A/B testing with Split on Heroku](http://blog.nathanhumbert.com/2012/02/rails-ab-testing-with-split-on-heroku.html)
## Backers
Support us with a monthly donation and help us continue our activities. [[Become a backer](https://opencollective.com/split#backer)]
<a href="https://opencollective.com/split/backer/0/website" target="_blank"><img src="https://opencollective.com/split/backer/0/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/1/website" target="_blank"><img src="https://opencollective.com/split/backer/1/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/2/website" target="_blank"><img src="https://opencollective.com/split/backer/2/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/3/website" target="_blank"><img src="https://opencollective.com/split/backer/3/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/4/website" target="_blank"><img src="https://opencollective.com/split/backer/4/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/5/website" target="_blank"><img src="https://opencollective.com/split/backer/5/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/6/website" target="_blank"><img src="https://opencollective.com/split/backer/6/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/7/website" target="_blank"><img src="https://opencollective.com/split/backer/7/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/8/website" target="_blank"><img src="https://opencollective.com/split/backer/8/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/9/website" target="_blank"><img src="https://opencollective.com/split/backer/9/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/10/website" target="_blank"><img src="https://opencollective.com/split/backer/10/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/11/website" target="_blank"><img src="https://opencollective.com/split/backer/11/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/12/website" target="_blank"><img src="https://opencollective.com/split/backer/12/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/13/website" target="_blank"><img src="https://opencollective.com/split/backer/13/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/14/website" target="_blank"><img src="https://opencollective.com/split/backer/14/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/15/website" target="_blank"><img src="https://opencollective.com/split/backer/15/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/16/website" target="_blank"><img src="https://opencollective.com/split/backer/16/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/17/website" target="_blank"><img src="https://opencollective.com/split/backer/17/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/18/website" target="_blank"><img src="https://opencollective.com/split/backer/18/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/19/website" target="_blank"><img src="https://opencollective.com/split/backer/19/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/20/website" target="_blank"><img src="https://opencollective.com/split/backer/20/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/21/website" target="_blank"><img src="https://opencollective.com/split/backer/21/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/22/website" target="_blank"><img src="https://opencollective.com/split/backer/22/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/23/website" target="_blank"><img src="https://opencollective.com/split/backer/23/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/24/website" target="_blank"><img src="https://opencollective.com/split/backer/24/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/25/website" target="_blank"><img src="https://opencollective.com/split/backer/25/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/26/website" target="_blank"><img src="https://opencollective.com/split/backer/26/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/27/website" target="_blank"><img src="https://opencollective.com/split/backer/27/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/28/website" target="_blank"><img src="https://opencollective.com/split/backer/28/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/29/website" target="_blank"><img src="https://opencollective.com/split/backer/29/avatar.svg"></a>
## Sponsors
Become a sponsor and get your logo on our README on Github with a link to your site. [[Become a sponsor](https://opencollective.com/split#sponsor)]
<a href="https://opencollective.com/split/sponsor/0/website" target="_blank"><img src="https://opencollective.com/split/sponsor/0/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/1/website" target="_blank"><img src="https://opencollective.com/split/sponsor/1/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/2/website" target="_blank"><img src="https://opencollective.com/split/sponsor/2/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/3/website" target="_blank"><img src="https://opencollective.com/split/sponsor/3/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/4/website" target="_blank"><img src="https://opencollective.com/split/sponsor/4/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/5/website" target="_blank"><img src="https://opencollective.com/split/sponsor/5/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/6/website" target="_blank"><img src="https://opencollective.com/split/sponsor/6/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/7/website" target="_blank"><img src="https://opencollective.com/split/sponsor/7/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/8/website" target="_blank"><img src="https://opencollective.com/split/sponsor/8/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/9/website" target="_blank"><img src="https://opencollective.com/split/sponsor/9/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/10/website" target="_blank"><img src="https://opencollective.com/split/sponsor/10/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/11/website" target="_blank"><img src="https://opencollective.com/split/sponsor/11/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/12/website" target="_blank"><img src="https://opencollective.com/split/sponsor/12/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/13/website" target="_blank"><img src="https://opencollective.com/split/sponsor/13/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/14/website" target="_blank"><img src="https://opencollective.com/split/sponsor/14/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/15/website" target="_blank"><img src="https://opencollective.com/split/sponsor/15/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/16/website" target="_blank"><img src="https://opencollective.com/split/sponsor/16/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/17/website" target="_blank"><img src="https://opencollective.com/split/sponsor/17/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/18/website" target="_blank"><img src="https://opencollective.com/split/sponsor/18/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/19/website" target="_blank"><img src="https://opencollective.com/split/sponsor/19/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/20/website" target="_blank"><img src="https://opencollective.com/split/sponsor/20/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/21/website" target="_blank"><img src="https://opencollective.com/split/sponsor/21/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/22/website" target="_blank"><img src="https://opencollective.com/split/sponsor/22/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/23/website" target="_blank"><img src="https://opencollective.com/split/sponsor/23/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/24/website" target="_blank"><img src="https://opencollective.com/split/sponsor/24/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/25/website" target="_blank"><img src="https://opencollective.com/split/sponsor/25/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/26/website" target="_blank"><img src="https://opencollective.com/split/sponsor/26/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/27/website" target="_blank"><img src="https://opencollective.com/split/sponsor/27/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/28/website" target="_blank"><img src="https://opencollective.com/split/sponsor/28/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/29/website" target="_blank"><img src="https://opencollective.com/split/sponsor/29/avatar.svg"></a>
## Contribute
Please do! Over 70 different people have contributed to the project, you can see them all here: https://github.com/splitrb/split/graphs/contributors.
### Development
The source code is hosted at [GitHub](https://github.com/splitrb/split).
Report issues and feature requests on [GitHub Issues](https://github.com/splitrb/split/issues).
You can find a discussion form on [Google Groups](https://groups.google.com/d/forum/split-ruby).
### Tests
Run the tests like this:
# Start a Redis server in another tab.
redis-server
bundle
rake spec
### A Note on Patches and Pull Requests
* Fork the project.
* Make your feature addition or bug fix.
* Add tests for it. This is important so I don't break it in a
future version unintentionally.
* Add documentation if necessary.
* Commit. Do not mess with the rakefile, version, or history.
(If you want to have your own version, that is fine. But bump the version in a commit by itself, which I can ignore when I pull.)
* Send a pull request. Bonus points for topic branches.
### Code of Conduct
Please note that this project is released with a [Contributor Code of Conduct](CODE_OF_CONDUCT.md). By participating in this project you agree to abide by its terms.
## Copyright
[MIT License](LICENSE) © 2019 [Andrew Nesbitt](https://github.com/andrew).
<MSG> Merge pull request #320 from grosser/grosser/symbols
modernize and symbols
<DFF> @@ -68,8 +68,8 @@ It can be used to render different templates, show different text or any other c
Example: View
```erb
-<% ab_test("login_button", "/images/button1.jpg", "/images/button2.jpg") do |button_file| %>
- <%= image_tag(button_file, :alt => "Login!") %>
+<% ab_test(:login_button, "/images/button1.jpg", "/images/button2.jpg") do |button_file| %>
+ <%= image_tag(button_file, alt: "Login!") %>
<% end %>
```
@@ -78,7 +78,7 @@ Example: Controller
```ruby
def register_new_user
# See what level of free points maximizes users' decision to buy replacement points.
- @starter_points = ab_test("new_user_free_points", '100', '200', '300')
+ @starter_points = ab_test(:new_user_free_points, '100', '200', '300')
end
```
@@ -87,14 +87,14 @@ Example: Conversion tracking (in a controller!)
```ruby
def buy_new_points
# some business logic
- finished("new_user_free_points")
+ finished(:new_user_free_points)
end
```
Example: Conversion tracking (in a view)
```erb
-Thanks for signing up, dude! <% finished("signup_page_redesign") %>
+Thanks for signing up, dude! <% finished(:signup_page_redesign) %>
```
You can find more examples, tutorials and guides on the [wiki](https://github.com/splitrb/split/wiki).
@@ -121,11 +121,11 @@ Perhaps you only want to show an alternative to 10% of your visitors because it
To do this you can pass a weight with each alternative in the following ways:
```ruby
-ab_test('homepage design', {'Old' => 18}, {'New' => 2})
+ab_test(:homepage_design, {'Old' => 18}, {'New' => 2})
-ab_test('homepage design', 'Old', {'New' => 1.0/9})
+ab_test(:homepage_design, 'Old', {'New' => 1.0/9})
-ab_test('homepage design', {'Old' => 9}, 'New')
+ab_test(:homepage_design', {'Old' => 9}, 'New')
```
This will only show the new alternative to visitors 1 in 10 times, the default weight for an alternative is 1.
@@ -161,7 +161,7 @@ When a user completes a test their session is reset so that they may start the t
To stop this behaviour you can pass the following option to the `finished` method:
```ruby
-finished('experiment_name', :reset => false)
+finished(:experiment_name, reset: false)
```
The user will then always see the alternative they started with.
@@ -202,9 +202,9 @@ Using Redis will allow ab_users to persist across sessions or machines.
```ruby
Split.configure do |config|
- config.persistence = Split::Persistence::RedisAdapter.with_config(:lookup_by => proc { |context| context.current_user_id })
+ config.persistence = Split::Persistence::RedisAdapter.with_config(lookup_by: -> (context) { context.current_user_id })
# Equivalent
- # config.persistence = Split::Persistence::RedisAdapter.with_config(:lookup_by => :current_user_id }
+ # config.persistence = Split::Persistence::RedisAdapter.with_config(lookup_by: :current_user_id)
end
```
@@ -276,8 +276,8 @@ For example:
``` ruby
Split.configure do |config|
- config.on_experiment_reset = proc{ |experiment| # Do something on reset }
- config.on_experiment_delete = proc{ |experiment| # Do something else on delete }
+ config.on_experiment_reset = -> (example) { # Do something on reset }
+ config.on_experiment_delete = -> (experiment) { # Do something else on delete }
end
```
@@ -298,13 +298,13 @@ run Rack::URLMap.new \
However, if you are using Rails 3: You can mount this inside your app routes by first adding this to the Gemfile:
```ruby
-gem 'split', :require => 'split/dashboard'
+gem 'split', require: 'split/dashboard'
```
Then adding this to config/routes.rb
```ruby
-mount Split::Dashboard, :at => 'split'
+mount Split::Dashboard, at: 'split'
```
You may want to password protect that page, you can do so with `Rack::Auth::Basic` (in your split initializer file)
@@ -317,11 +317,11 @@ end
You can even use Devise or any other Warden-based authentication method to authorize users. Just replace `mount Split::Dashboard, :at => 'split'` in `config/routes.rb` with the following:
```ruby
-match "/split" => Split::Dashboard, :anchor => false, :via => [:get, :post], :constraints => lambda { |request|
+match "/split" => Split::Dashboard, anchor: false, via: [:get, :post], constraints: -> (request) do
request.env['warden'].authenticated? # are we authenticated?
request.env['warden'].authenticate! # authenticate if not already
# or even check any other condition such as request.env['warden'].user.is_admin?
-}
+end
```
More information on this [here](http://steve.dynedge.co.uk/2011/12/09/controlling-access-to-routes-and-rack-apps-in-rails-3-with-devise-and-warden/)
@@ -337,7 +337,7 @@ You can override the default configuration options of Split like so:
```ruby
Split.configure do |config|
config.db_failover = true # handle redis errors gracefully
- config.db_failover_on_db_error = proc{|error| Rails.logger.error(error.message) }
+ config.db_failover_on_db_error = -> (error) { Rails.logger.error(error.message) }
config.allow_multiple_experiments = true
config.enabled = true
config.persistence = Split::Persistence::SessionAdapter
@@ -363,7 +363,7 @@ Split.configure do |config|
config.ignore_ip_addresses << '81.19.48.130' # or regex: /81\.19\.48\.[0-9]+/
# or provide your own filter functionality, the default is proc{ |request| is_robot? || is_ignored_ip_address? }
- config.ignore_filter = proc{ |request| CustomExcludeLogic.excludes?(request) }
+ config.ignore_filter = -> (request) { CustomExcludeLogic.excludes?(request) }
end
```
@@ -376,15 +376,15 @@ algorithm and if the experiment resets once finished:
```ruby
Split.configure do |config|
config.experiments = {
- "my_first_experiment" => {
- :alternatives => ["a", "b"],
- :resettable => false
+ my_first_experiment: {
+ alternatives: ["a", "b"],
+ resettable: false
},
- "my_second_experiment" => {
- :algorithm => 'Split::Algorithms::Whiplash',
- :alternatives => [
- { :name => "a", :percent => 67 },
- { :name => "b", :percent => 33 }
+ :my_second_experiment => {
+ algorithm: 'Split::Algorithms::Whiplash',
+ alternatives: [
+ { name: "a", percent: 67 },
+ { name: "b", percent: 33 }
]
}
}
@@ -418,13 +418,13 @@ my_second_experiment:
This simplifies the calls from your code:
```ruby
-ab_test("my_first_experiment")
+ab_test(:my_first_experiment)
```
and:
```ruby
-finished("my_first_experiment")
+finished(:my_first_experiment)
```
You can also add meta data for each experiment, very useful when you need more than an alternative name to change behaviour:
@@ -468,9 +468,9 @@ the `:metric` option.
```ruby
Split.configure do |config|
config.experiments = {
- "my_first_experiment" => {
- :alternatives => ["a", "b"],
- :metric => :my_metric,
+ my_first_experiment: {
+ alternatives: ["a", "b"],
+ metric: :my_metric,
}
}
end
@@ -496,7 +496,7 @@ You might wish to allow an experiment to have multiple, distinguishable goals.
The API to define goals for an experiment is this:
```ruby
-ab_test({"link_color" => ["purchase", "refund"]}, "red", "blue")
+ab_test({link_color: ["purchase", "refund"]}, "red", "blue")
```
or you can you can define them in a configuration file:
@@ -504,9 +504,9 @@ or you can you can define them in a configuration file:
```ruby
Split.configure do |config|
config.experiments = {
- "link_color" => {
- :alternatives => ["red", "blue"],
- :goals => ["purchase", "refund"]
+ link_color: {
+ alternatives: ["red", "blue"],
+ goals: ["purchase", "refund"]
}
}
end
@@ -515,7 +515,7 @@ end
To complete a goal conversion, you do it like:
```ruby
-finished("link_color" => "purchase")
+finished(link_color: "purchase")
```
**NOTE:** This does not mean that a single experiment can have/complete progressive goals.
| 37 | Merge pull request #320 from grosser/grosser/symbols | 37 | .md | md | mit | splitrb/split |
10071126 | <NME> README.md
<BEF> # [Split](https://libraries.io/rubygems/split)
[](http://badge.fury.io/rb/split)

[](https://codeclimate.com/github/splitrb/split)
[](https://codeclimate.com/github/splitrb/split/coverage)
[](https://github.com/RichardLitt/standard-readme)
[](https://www.codetriage.com/splitrb/split)
> 📈 The Rack Based A/B testing framework https://libraries.io/rubygems/split
Split is a rack based A/B testing framework designed to work with Rails, Sinatra or any other rack based app.
Split is heavily inspired by the [Abingo](https://github.com/ryanb/abingo) and [Vanity](https://github.com/assaf/vanity) Rails A/B testing plugins and [Resque](https://github.com/resque/resque) in its use of Redis.
Split is designed to be hacker friendly, allowing for maximum customisation and extensibility.
## Install
### Requirements
Split v4.0+ is currently tested with Ruby >= 2.5 and Rails >= 5.2.
If your project requires compatibility with Ruby 2.4.x or older Rails versions. You can try v3.0 or v0.8.0(for Ruby 1.9.3)
Split uses Redis as a datastore.
Split only supports Redis 4.0 or greater.
If you're on OS X, Homebrew is the simplest way to install Redis:
```bash
brew install redis
redis-server /usr/local/etc/redis.conf
```
You now have a Redis daemon running on port `6379`.
### Setup
```bash
gem install split
```
#### Rails
Adding `gem 'split'` to your Gemfile will autoload it when rails starts up, as long as you've configured Redis it will 'just work'.
#### Sinatra
To configure Sinatra with Split you need to enable sessions and mix in the helper methods. Add the following lines at the top of your Sinatra app:
```ruby
require 'split'
class MySinatraApp < Sinatra::Base
enable :sessions
helpers Split::Helper
get '/' do
...
end
```
## Usage
To begin your A/B test use the `ab_test` method, naming your experiment with the first argument and then the different alternatives which you wish to test on as the other arguments.
Example: View
```erb
<% ab_test("login_button", "/images/button1.jpg", "/images/button2.jpg") do |button_file| %>
<%= image_tag(button_file, :alt => "Login!") %>
<% end %>
```
```erb
```ruby
def register_new_user
# See what level of free points maximizes users' decision to buy replacement points.
@starter_points = ab_test("new_user_free_points", '100', '200', '300')
end
```
```ruby
def register_new_user
```ruby
def buy_new_points
# some business logic
finished("new_user_free_points")
end
```
Example: Conversion tracking (in a view)
```erb
Thanks for signing up, dude! <% finished("signup_page_redesign") %>
```
You can find more examples, tutorials and guides on the [wiki](https://github.com/splitrb/split/wiki).
Example: Conversion tracking (in a view)
```erb
Thanks for signing up, dude! <% ab_finished(:signup_page_redesign) %>
```
You can find more examples, tutorials and guides on the [wiki](https://github.com/splitrb/split/wiki).
## Statistical Validity
Split has two options for you to use to determine which alternative is the best.
The first option (default on the dashboard) uses a z test (n>30) for the difference between your control and alternative conversion rates to calculate statistical significance. This test will tell you whether an alternative is better or worse than your control, but it will not distinguish between which alternative is the best in an experiment with multiple alternatives. Split will only tell you if your experiment is 90%, 95%, or 99% significant, and this test only works if you have more than 30 participants and 5 conversions for each branch.
As per this [blog post](https://www.evanmiller.org/how-not-to-run-an-ab-test.html) on the pitfalls of A/B testing, it is highly recommended that you determine your requisite sample size for each branch before running the experiment. Otherwise, you'll have an increased rate of false positives (experiments which show a significant effect where really there is none).
[Here](https://www.evanmiller.org/ab-testing/sample-size.html) is a sample size calculator for your convenience.
The second option uses simulations from a beta distribution to determine the probability that the given alternative is the winner compared to all other alternatives. You can view these probabilities by clicking on the drop-down menu labeled "Confidence." This option should be used when the experiment has more than just 1 control and 1 alternative. It can also be used for a simple, 2-alternative A/B test.
To do this you can pass a weight with each alternative in the following ways:
```ruby
ab_test('homepage design', {'Old' => 18}, {'New' => 2})
ab_test('homepage design', 'Old', {'New' => 1.0/9})
ab_test('homepage design', {'Old' => 9}, 'New')
```
This will only show the new alternative to visitors 1 in 10 times, the default weight for an alternative is 1.
Perhaps you only want to show an alternative to 10% of your visitors because it is very experimental or not yet fully load tested.
To do this you can pass a weight with each alternative in the following ways:
```ruby
ab_test(:homepage_design, {'Old' => 18}, {'New' => 2})
ab_test(:homepage_design, 'Old', {'New' => 1.0/9})
ab_test(:homepage_design, {'Old' => 9}, 'New')
```
This will only show the new alternative to visitors 1 in 10 times, the default weight for an alternative is 1.
### Overriding alternatives
For development and testing, you may wish to force your app to always return an alternative.
You can do this by passing it as a parameter in the url.
If you have an experiment called `button_color` with alternatives called `red` and `blue` used on your homepage, a url such as:
http://myawesomesite.com?ab_test[button_color]=red
will always have red buttons. This won't be stored in your session or count towards to results, unless you set the `store_override` configuration option.
In the event you want to disable all tests without having to know the individual experiment names, add a `SPLIT_DISABLE` query parameter.
http://myawesomesite.com?SPLIT_DISABLE=true
To stop this behaviour you can pass the following option to the `finished` method:
```ruby
finished('experiment_name', :reset => false)
```
The user will then always see the alternative they started with.
```ruby
# Create a file with these contents at 'spec/support/split_helper.rb'
# and ensure it is `require`d in your rails_helper.rb or spec_helper.rb
module SplitHelper
# Force a specific experiment alternative to always be returned:
# use_ab_test(signup_form: "single_page")
#
# Force alternatives for multiple experiments:
# use_ab_test(signup_form: "single_page", pricing: "show_enterprise_prices")
#
def use_ab_test(alternatives_by_experiment)
allow_any_instance_of(Split::Helper).to receive(:ab_test) do |_receiver, experiment, &block|
variant = alternatives_by_experiment.fetch(experiment) { |key| raise "Unknown experiment '#{key}'" }
block.call(variant) unless block.nil?
variant
end
end
end
# Make the `use_ab_test` method available to all specs:
RSpec.configure do |config|
config.include SplitHelper
end
```
Now you can call `use_ab_test(alternatives_by_experiment)` in your specs, for example:
```ruby
it "registers using experimental signup" do
use_ab_test experiment_name: "alternative_name"
post "/signups"
...
end
```
```ruby
Split.configure do |config|
config.persistence = Split::Persistence::RedisAdapter.with_config(:lookup_by => proc { |context| context.current_user_id })
# Equivalent
# config.persistence = Split::Persistence::RedisAdapter.with_config(:lookup_by => :current_user_id }
end
```
### Reset after completion
When a user completes a test their session is reset so that they may start the test again in the future.
To stop this behaviour you can pass the following option to the `ab_finished` method:
```ruby
ab_finished(:experiment_name, reset: false)
```
The user will then always see the alternative they started with.
Any old unfinished experiment key will be deleted from the user's data storage if the experiment had been removed or is over and a winner had been chosen. This allows a user to enroll into any new experiment in cases when the `allow_multiple_experiments` config option is set to `false`.
### Reset experiments manually
By default Split automatically resets the experiment whenever it detects the configuration for an experiment has changed (e.g. you call `ab_test` with different alternatives). You can prevent this by setting the option `reset_manually` to `true`.
You may want to do this when you want to change something, like the variants' names, the metadata about an experiment, etc. without resetting everything.
### Multiple experiments at once
By default Split will avoid users participating in multiple experiments at once. This means you are less likely to skew results by adding in more variation to your tests.
To stop this behaviour and allow users to participate in multiple experiments at once set the `allow_multiple_experiments` config option to true like so:
```ruby
Split.configure do |config|
config.allow_multiple_experiments = true
end
```
This will allow the user to participate in any number of experiments and belong to any alternative in each experiment. This has the possible downside of a variation in one experiment influencing the outcome of another.
To address this, setting the `allow_multiple_experiments` config option to 'control' like so:
```ruby
Split.configure do |config|
config.allow_multiple_experiments = 'control'
end
```
For this to work, each and every experiment you define must have an alternative named 'control'. This will allow the user to participate in multiple experiments as long as the user belongs to the alternative 'control' in each experiment. As soon as the user belongs to an alternative named something other than 'control' the user may not participate in any more experiments. Calling ab_test(<other experiments>) will always return the first alternative without adding the user to that experiment.
### Experiment Persistence
Split comes with three built-in persistence adapters for storing users and the alternatives they've been given for each experiment.
By default Split will store the tests for each user in the session.
You can optionally configure Split to use a cookie, Redis, or any custom adapter of your choosing.
#### Cookies
```ruby
Split.configure do |config|
config.persistence = :cookie
end
```
When using the cookie persistence, Split stores data into an anonymous tracking cookie named 'split', which expires in 1 year. To change that, set the `persistence_cookie_length` in the configuration (unit of time in seconds).
```ruby
Split.configure do |config|
config.persistence = :cookie
config.persistence_cookie_length = 2592000 # 30 days
``` ruby
Split.configure do |config|
config.on_experiment_reset = proc{ |experiment| # Do something on reset }
config.on_experiment_delete = proc{ |experiment| # Do something else on delete }
end
```
Using Redis will allow ab_users to persist across sessions or machines.
```ruby
Split.configure do |config|
config.persistence = Split::Persistence::RedisAdapter.with_config(lookup_by: -> (context) { context.current_user_id })
# Equivalent
# config.persistence = Split::Persistence::RedisAdapter.with_config(lookup_by: :current_user_id)
end
```
Options:
* `lookup_by`: method to invoke per request for uniquely identifying ab_users (mandatory configuration)
* `namespace`: separate namespace to store these persisted values (default "persistence")
However, if you are using Rails 3: You can mount this inside your app routes by first adding this to the Gemfile:
```ruby
gem 'split', :require => 'split/dashboard'
```
Then adding this to config/routes.rb
```ruby
mount Split::Dashboard, :at => 'split'
```
You may want to password protect that page, you can do so with `Rack::Auth::Basic` (in your split initializer file)
config.persistence = Split::Persistence::DualAdapter.with_config(
logged_in: -> (context) { !context.send(:current_user).try(:id).nil? },
logged_in_adapter: redis_adapter,
logged_out_adapter: cookie_adapter)
config.persistence_cookie_length = 2592000 # 30 days
end
You can even use Devise or any other Warden-based authentication method to authorize users. Just replace `mount Split::Dashboard, :at => 'split'` in `config/routes.rb` with the following:
```ruby
match "/split" => Split::Dashboard, :anchor => false, :via => [:get, :post], :constraints => lambda { |request|
request.env['warden'].authenticated? # are we authenticated?
request.env['warden'].authenticate! # authenticate if not already
# or even check any other condition such as request.env['warden'].user.is_admin?
}
```
More information on this [here](http://steve.dynedge.co.uk/2011/12/09/controlling-access-to-routes-and-rack-apps-in-rails-3-with-devise-and-warden/)
```
### Trial Event Hooks
You can define methods that will be called at the same time as experiment
alternative participation and goal completion.
For example:
```ruby
Split.configure do |config|
config.db_failover = true # handle redis errors gracefully
config.db_failover_on_db_error = proc{|error| Rails.logger.error(error.message) }
config.allow_multiple_experiments = true
config.enabled = true
config.persistence = Split::Persistence::SessionAdapter
Set these attributes to a method name available in the same context as the
`ab_test` method. These methods should accept one argument, a `Trial` instance.
``` ruby
def log_trial(trial)
logger.info "experiment=%s alternative=%s user=%s" %
[ trial.experiment.name, trial.alternative, current_user.id ]
end
def log_trial_choose(trial)
logger.info "[new user] experiment=%s alternative=%s user=%s" %
[ trial.experiment.name, trial.alternative, current_user.id ]
end
def log_trial_complete(trial)
logger.info "experiment=%s alternative=%s user=%s complete=true" %
[ trial.experiment.name, trial.alternative, current_user.id ]
end
config.ignore_ip_addresses << '81.19.48.130' # or regex: /81\.19\.48\.[0-9]+/
# or provide your own filter functionality, the default is proc{ |request| is_robot? || is_ignored_ip_address? }
config.ignore_filter = proc{ |request| CustomExcludeLogic.excludes?(request) }
end
```
in the controller:
``` ruby
helper_method :log_trial_choose
def log_trial_choose(trial)
```ruby
Split.configure do |config|
config.experiments = {
"my_first_experiment" => {
:alternatives => ["a", "b"],
:resettable => false
},
"my_second_experiment" => {
:algorithm => 'Split::Algorithms::Whiplash',
:alternatives => [
{ :name => "a", :percent => 67 },
{ :name => "b", :percent => 33 }
]
}
}
config.on_experiment_delete = -> (experiment) { # Do something else on delete }
# before experiment reset or deleted
config.on_before_experiment_reset = -> (example) { # Do something on reset }
config.on_before_experiment_delete = -> (experiment) { # Do something else on delete }
# after experiment winner had been set
config.on_experiment_winner_choose = -> (experiment) { # Do something on winner choose }
end
```
## Web Interface
Split comes with a Sinatra-based front end to get an overview of how your experiments are doing.
If you are running Rails 2: You can mount this inside your app using Rack::URLMap in your `config.ru`
```ruby
require 'split/dashboard'
run Rack::URLMap.new \
"/" => Your::App.new,
"/split" => Split::Dashboard.new
```
However, if you are using Rails 3 or higher: You can mount this inside your app routes by first adding this to the Gemfile:
```ruby
gem 'split', require: 'split/dashboard'
This simplifies the calls from your code:
```ruby
ab_test("my_first_experiment")
```
and:
```ruby
finished("my_first_experiment")
```
You can also add meta data for each experiment, very useful when you need more than an alternative name to change behaviour:
# Protect against timing attacks:
# - Use & (do not use &&) so that it doesn't short circuit.
# - Use digests to stop length information leaking
ActiveSupport::SecurityUtils.secure_compare(::Digest::SHA256.hexdigest(username), ::Digest::SHA256.hexdigest(ENV["SPLIT_USERNAME"])) &
ActiveSupport::SecurityUtils.secure_compare(::Digest::SHA256.hexdigest(password), ::Digest::SHA256.hexdigest(ENV["SPLIT_PASSWORD"]))
end
# Apps without activesupport
Split::Dashboard.use Rack::Auth::Basic do |username, password|
# Protect against timing attacks:
# - Use & (do not use &&) so that it doesn't short circuit.
# - Use digests to stop length information leaking
Rack::Utils.secure_compare(::Digest::SHA256.hexdigest(username), ::Digest::SHA256.hexdigest(ENV["SPLIT_USERNAME"])) &
Rack::Utils.secure_compare(::Digest::SHA256.hexdigest(password), ::Digest::SHA256.hexdigest(ENV["SPLIT_PASSWORD"]))
end
```
You can even use Devise or any other Warden-based authentication method to authorize users. Just replace `mount Split::Dashboard, :at => 'split'` in `config/routes.rb` with the following:
```ruby
match "/split" => Split::Dashboard, anchor: false, via: [:get, :post, :delete], constraints: -> (request) do
request.env['warden'].authenticated? # are we authenticated?
request.env['warden'].authenticate! # authenticate if not already
# or even check any other condition such as request.env['warden'].user.is_admin?
end
```
More information on this [here](https://steve.dynedge.co.uk/2011/12/09/controlling-access-to-routes-and-rack-apps-in-rails-3-with-devise-and-warden/)
### Screenshot

## Configuration
You can override the default configuration options of Split like so:
```ruby
```ruby
Split.configure do |config|
config.experiments = {
"my_first_experiment" => {
:alternatives => ["a", "b"],
:metric => :my_metric,
}
}
end
config.redis = "redis://custom.redis.url:6380"
end
```
Split looks for the Redis host in the environment variable `REDIS_URL` then
defaults to `redis://localhost:6379` if not specified by configure block.
On platforms like Heroku, Split will use the value of `REDIS_PROVIDER` to
determine which env variable key to use when retrieving the host config. This
defaults to `REDIS_URL`.
### Filtering
In most scenarios you don't want to have AB-Testing enabled for web spiders, robots or special groups of users.
Split provides functionality to filter this based on a predefined, extensible list of bots, IP-lists or custom exclude logic.
```ruby
Split.configure do |config|
# bot config
The API to define goals for an experiment is this:
```ruby
ab_test({"link_color" => ["purchase", "refund"]}, "red", "blue")
```
or you can you can define them in a configuration file:
config.ignore_filter = -> (request) { CustomExcludeLogic.excludes?(request) }
```ruby
Split.configure do |config|
config.experiments = {
"link_color" => {
:alternatives => ["red", "blue"],
:goals => ["purchase", "refund"]
}
}
end
```ruby
Split.configure do |config|
To complete a goal conversion, you do it like:
```ruby
finished("link_color" => "purchase")
```
**NOTE:** This does not mean that a single experiment can have/complete progressive goals.
alternatives: [
{ name: "a", percent: 67 },
{ name: "b", percent: 33 }
]
}
}
end
```
You can also store your experiments in a YAML file:
```ruby
Split.configure do |config|
config.experiments = YAML.load_file "config/experiments.yml"
end
```
You can then define the YAML file like:
```yaml
my_first_experiment:
alternatives:
- a
- b
my_second_experiment:
alternatives:
- name: a
percent: 67
- name: b
percent: 33
resettable: false
```
This simplifies the calls from your code:
```ruby
ab_test(:my_first_experiment)
```
and:
```ruby
ab_finished(:my_first_experiment)
```
You can also add meta data for each experiment, which is very useful when you need more than an alternative name to change behaviour:
```ruby
Split.configure do |config|
config.experiments = {
my_first_experiment: {
alternatives: ["a", "b"],
metadata: {
"a" => {"text" => "Have a fantastic day"},
"b" => {"text" => "Don't get hit by a bus"}
}
}
}
end
```
```yaml
my_first_experiment:
alternatives:
- a
- b
metadata:
a:
text: "Have a fantastic day"
b:
text: "Don't get hit by a bus"
```
This allows for some advanced experiment configuration using methods like:
```ruby
trial.alternative.name # => "a"
trial.metadata['text'] # => "Have a fantastic day"
```
or in views:
```erb
<% ab_test("my_first_experiment") do |alternative, meta| %>
<%= alternative %>
<small><%= meta['text'] %></small>
<% end %>
```
The keys used in meta data should be Strings
#### Metrics
You might wish to track generic metrics, such as conversions, and use
those to complete multiple different experiments without adding more to
your code. You can use the configuration hash to do this, thanks to
the `:metric` option.
```ruby
Split.configure do |config|
config.experiments = {
my_first_experiment: {
alternatives: ["a", "b"],
metric: :my_metric
}
}
end
```
Your code may then track a completion using the metric instead of
the experiment name:
```ruby
ab_finished(:my_metric)
```
You can also create a new metric by instantiating and saving a new Metric object.
```ruby
Split::Metric.new(:my_metric)
Split::Metric.save
```
#### Goals
You might wish to allow an experiment to have multiple, distinguishable goals.
The API to define goals for an experiment is this:
```ruby
ab_test({link_color: ["purchase", "refund"]}, "red", "blue")
```
or you can define them in a configuration file:
```ruby
Split.configure do |config|
config.experiments = {
link_color: {
alternatives: ["red", "blue"],
goals: ["purchase", "refund"]
}
}
end
```
To complete a goal conversion, you do it like:
```ruby
ab_finished(link_color: "purchase")
```
Note that if you pass additional options, that should be a separate hash:
```ruby
ab_finished({ link_color: "purchase" }, reset: false)
```
**NOTE:** This does not mean that a single experiment can complete more than one goal.
Once you finish one of the goals, the test is considered to be completed, and finishing the other goal will no longer register. (Assuming the test runs with `reset: false`.)
**Good Example**: Test if listing Plan A first result in more conversions to Plan A (goal: "plana_conversion") or Plan B (goal: "planb_conversion").
**Bad Example**: Test if button color increases conversion rate through multiple steps of a funnel. THIS WILL NOT WORK.
**Bad Example**: Test both how button color affects signup *and* how it affects login, at the same time. THIS WILL NOT WORK.
#### Combined Experiments
If you want to test how button color affects signup *and* how it affects login at the same time, use combined experiments.
Configure like so:
```ruby
Split.configuration.experiments = {
:button_color_experiment => {
:alternatives => ["blue", "green"],
:combined_experiments => ["button_color_on_signup", "button_color_on_login"]
}
}
```
Starting the combined test starts all combined experiments
```ruby
ab_combined_test(:button_color_experiment)
```
Finish each combined test as normal
```ruby
ab_finished(:button_color_on_login)
ab_finished(:button_color_on_signup)
```
**Additional Configuration**:
* Be sure to enable `allow_multiple_experiments`
* In Sinatra include the CombinedExperimentsHelper
```
helpers Split::CombinedExperimentsHelper
```
### DB failover solution
Due to the fact that Redis has no automatic failover mechanism, it's
possible to switch on the `db_failover` config option, so that `ab_test`
and `ab_finished` will not crash in case of a db failure. `ab_test` always
delivers alternative A (the first one) in that case.
It's also possible to set a `db_failover_on_db_error` callback (proc)
for example to log these errors via Rails.logger.
### Redis
You may want to change the Redis host and port Split connects to, or
set various other options at startup.
Split has a `redis` setter which can be given a string or a Redis
object. This means if you're already using Redis in your app, Split
can re-use the existing connection.
String: `Split.redis = 'redis://localhost:6379'`
Redis: `Split.redis = $redis`
For our rails app we have a `config/initializers/split.rb` file where
we load `config/split.yml` by hand and set the Redis information
appropriately.
Here's our `config/split.yml`:
```yml
development: redis://localhost:6379
test: redis://localhost:6379
staging: redis://redis1.example.com:6379
fi: redis://localhost:6379
production: redis://redis1.example.com:6379
```
And our initializer:
```ruby
split_config = YAML.load_file(Rails.root.join('config', 'split.yml'))
Split.redis = split_config[Rails.env]
```
### Redis Caching (v4.0+)
In some high-volume usage scenarios, Redis load can be incurred by repeated
fetches for fairly static data. Enabling caching will reduce this load.
```ruby
Split.configuration.cache = true
````
This currently caches:
- `Split::ExperimentCatalog.find`
- `Split::Experiment.start_time`
- `Split::Experiment.winner`
## Namespaces
If you're running multiple, separate instances of Split you may want
to namespace the keyspaces so they do not overlap. This is not unlike
the approach taken by many memcached clients.
This feature can be provided by the [redis-namespace](https://github.com/defunkt/redis-namespace)
library. To configure Split to use `Redis::Namespace`, do the following:
1. Add `redis-namespace` to your Gemfile:
```ruby
gem 'redis-namespace'
```
2. Configure `Split.redis` to use a `Redis::Namespace` instance (possible in an
initializer):
```ruby
redis = Redis.new(url: ENV['REDIS_URL']) # or whatever config you want
Split.redis = Redis::Namespace.new(:your_namespace, redis: redis)
```
## Outside of a Web Session
Split provides the Helper module to facilitate running experiments inside web sessions.
Alternatively, you can access the underlying Metric, Trial, Experiment and Alternative objects to
conduct experiments that are not tied to a web session.
```ruby
# create a new experiment
experiment = Split::ExperimentCatalog.find_or_create('color', 'red', 'blue')
# create a new trial
trial = Split::Trial.new(:experiment => experiment)
# run trial
trial.choose!
# get the result, returns either red or blue
trial.alternative.name
# if the goal has been achieved, increment the successful completions for this alternative.
if goal_achieved?
trial.complete!
end
```
## Algorithms
By default, Split ships with `Split::Algorithms::WeightedSample` that randomly selects from possible alternatives for a traditional a/b test.
It is possible to specify static weights to favor certain alternatives.
`Split::Algorithms::Whiplash` is an implementation of a [multi-armed bandit algorithm](http://stevehanov.ca/blog/index.php?id=132).
This algorithm will automatically weight the alternatives based on their relative performance,
choosing the better-performing ones more often as trials are completed.
`Split::Algorithms::BlockRandomization` is an algorithm that ensures equal
participation across all alternatives. This algorithm will choose the alternative
with the fewest participants. In the event of multiple minimum participant alternatives
(i.e. starting a new "Block") the algorithm will choose a random alternative from
those minimum participant alternatives.
Users may also write their own algorithms. The default algorithm may be specified globally in the configuration file, or on a per experiment basis using the experiments hash of the configuration file.
To change the algorithm globally for all experiments, use the following in your initializer:
```ruby
Split.configure do |config|
config.algorithm = Split::Algorithms::Whiplash
end
```
## Extensions
- [Split::Export](https://github.com/splitrb/split-export) - Easily export A/B test data out of Split.
- [Split::Analytics](https://github.com/splitrb/split-analytics) - Push test data to Google Analytics.
- [Split::Mongoid](https://github.com/MongoHQ/split-mongoid) - Store experiment data in mongoid (still uses redis).
- [Split::Cacheable](https://github.com/harrystech/split_cacheable) - Automatically create cache buckets per test.
- [Split::Counters](https://github.com/bernardkroes/split-counters) - Add counters per experiment and alternative.
- [Split::Cli](https://github.com/craigmcnamara/split-cli) - A CLI to trigger Split A/B tests.
## Screencast
Ryan bates has produced an excellent 10 minute screencast about split on the Railscasts site: [A/B Testing with Split](http://railscasts.com/episodes/331-a-b-testing-with-split)
## Blogposts
* [Recipe: A/B testing with KISSMetrics and the split gem](https://robots.thoughtbot.com/post/9595887299/recipe-a-b-testing-with-kissmetrics-and-the-split-gem)
* [Rails A/B testing with Split on Heroku](http://blog.nathanhumbert.com/2012/02/rails-ab-testing-with-split-on-heroku.html)
## Backers
Support us with a monthly donation and help us continue our activities. [[Become a backer](https://opencollective.com/split#backer)]
<a href="https://opencollective.com/split/backer/0/website" target="_blank"><img src="https://opencollective.com/split/backer/0/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/1/website" target="_blank"><img src="https://opencollective.com/split/backer/1/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/2/website" target="_blank"><img src="https://opencollective.com/split/backer/2/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/3/website" target="_blank"><img src="https://opencollective.com/split/backer/3/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/4/website" target="_blank"><img src="https://opencollective.com/split/backer/4/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/5/website" target="_blank"><img src="https://opencollective.com/split/backer/5/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/6/website" target="_blank"><img src="https://opencollective.com/split/backer/6/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/7/website" target="_blank"><img src="https://opencollective.com/split/backer/7/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/8/website" target="_blank"><img src="https://opencollective.com/split/backer/8/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/9/website" target="_blank"><img src="https://opencollective.com/split/backer/9/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/10/website" target="_blank"><img src="https://opencollective.com/split/backer/10/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/11/website" target="_blank"><img src="https://opencollective.com/split/backer/11/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/12/website" target="_blank"><img src="https://opencollective.com/split/backer/12/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/13/website" target="_blank"><img src="https://opencollective.com/split/backer/13/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/14/website" target="_blank"><img src="https://opencollective.com/split/backer/14/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/15/website" target="_blank"><img src="https://opencollective.com/split/backer/15/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/16/website" target="_blank"><img src="https://opencollective.com/split/backer/16/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/17/website" target="_blank"><img src="https://opencollective.com/split/backer/17/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/18/website" target="_blank"><img src="https://opencollective.com/split/backer/18/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/19/website" target="_blank"><img src="https://opencollective.com/split/backer/19/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/20/website" target="_blank"><img src="https://opencollective.com/split/backer/20/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/21/website" target="_blank"><img src="https://opencollective.com/split/backer/21/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/22/website" target="_blank"><img src="https://opencollective.com/split/backer/22/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/23/website" target="_blank"><img src="https://opencollective.com/split/backer/23/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/24/website" target="_blank"><img src="https://opencollective.com/split/backer/24/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/25/website" target="_blank"><img src="https://opencollective.com/split/backer/25/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/26/website" target="_blank"><img src="https://opencollective.com/split/backer/26/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/27/website" target="_blank"><img src="https://opencollective.com/split/backer/27/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/28/website" target="_blank"><img src="https://opencollective.com/split/backer/28/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/29/website" target="_blank"><img src="https://opencollective.com/split/backer/29/avatar.svg"></a>
## Sponsors
Become a sponsor and get your logo on our README on Github with a link to your site. [[Become a sponsor](https://opencollective.com/split#sponsor)]
<a href="https://opencollective.com/split/sponsor/0/website" target="_blank"><img src="https://opencollective.com/split/sponsor/0/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/1/website" target="_blank"><img src="https://opencollective.com/split/sponsor/1/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/2/website" target="_blank"><img src="https://opencollective.com/split/sponsor/2/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/3/website" target="_blank"><img src="https://opencollective.com/split/sponsor/3/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/4/website" target="_blank"><img src="https://opencollective.com/split/sponsor/4/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/5/website" target="_blank"><img src="https://opencollective.com/split/sponsor/5/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/6/website" target="_blank"><img src="https://opencollective.com/split/sponsor/6/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/7/website" target="_blank"><img src="https://opencollective.com/split/sponsor/7/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/8/website" target="_blank"><img src="https://opencollective.com/split/sponsor/8/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/9/website" target="_blank"><img src="https://opencollective.com/split/sponsor/9/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/10/website" target="_blank"><img src="https://opencollective.com/split/sponsor/10/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/11/website" target="_blank"><img src="https://opencollective.com/split/sponsor/11/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/12/website" target="_blank"><img src="https://opencollective.com/split/sponsor/12/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/13/website" target="_blank"><img src="https://opencollective.com/split/sponsor/13/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/14/website" target="_blank"><img src="https://opencollective.com/split/sponsor/14/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/15/website" target="_blank"><img src="https://opencollective.com/split/sponsor/15/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/16/website" target="_blank"><img src="https://opencollective.com/split/sponsor/16/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/17/website" target="_blank"><img src="https://opencollective.com/split/sponsor/17/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/18/website" target="_blank"><img src="https://opencollective.com/split/sponsor/18/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/19/website" target="_blank"><img src="https://opencollective.com/split/sponsor/19/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/20/website" target="_blank"><img src="https://opencollective.com/split/sponsor/20/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/21/website" target="_blank"><img src="https://opencollective.com/split/sponsor/21/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/22/website" target="_blank"><img src="https://opencollective.com/split/sponsor/22/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/23/website" target="_blank"><img src="https://opencollective.com/split/sponsor/23/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/24/website" target="_blank"><img src="https://opencollective.com/split/sponsor/24/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/25/website" target="_blank"><img src="https://opencollective.com/split/sponsor/25/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/26/website" target="_blank"><img src="https://opencollective.com/split/sponsor/26/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/27/website" target="_blank"><img src="https://opencollective.com/split/sponsor/27/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/28/website" target="_blank"><img src="https://opencollective.com/split/sponsor/28/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/29/website" target="_blank"><img src="https://opencollective.com/split/sponsor/29/avatar.svg"></a>
## Contribute
Please do! Over 70 different people have contributed to the project, you can see them all here: https://github.com/splitrb/split/graphs/contributors.
### Development
The source code is hosted at [GitHub](https://github.com/splitrb/split).
Report issues and feature requests on [GitHub Issues](https://github.com/splitrb/split/issues).
You can find a discussion form on [Google Groups](https://groups.google.com/d/forum/split-ruby).
### Tests
Run the tests like this:
# Start a Redis server in another tab.
redis-server
bundle
rake spec
### A Note on Patches and Pull Requests
* Fork the project.
* Make your feature addition or bug fix.
* Add tests for it. This is important so I don't break it in a
future version unintentionally.
* Add documentation if necessary.
* Commit. Do not mess with the rakefile, version, or history.
(If you want to have your own version, that is fine. But bump the version in a commit by itself, which I can ignore when I pull.)
* Send a pull request. Bonus points for topic branches.
### Code of Conduct
Please note that this project is released with a [Contributor Code of Conduct](CODE_OF_CONDUCT.md). By participating in this project you agree to abide by its terms.
## Copyright
[MIT License](LICENSE) © 2019 [Andrew Nesbitt](https://github.com/andrew).
<MSG> Merge pull request #320 from grosser/grosser/symbols
modernize and symbols
<DFF> @@ -68,8 +68,8 @@ It can be used to render different templates, show different text or any other c
Example: View
```erb
-<% ab_test("login_button", "/images/button1.jpg", "/images/button2.jpg") do |button_file| %>
- <%= image_tag(button_file, :alt => "Login!") %>
+<% ab_test(:login_button, "/images/button1.jpg", "/images/button2.jpg") do |button_file| %>
+ <%= image_tag(button_file, alt: "Login!") %>
<% end %>
```
@@ -78,7 +78,7 @@ Example: Controller
```ruby
def register_new_user
# See what level of free points maximizes users' decision to buy replacement points.
- @starter_points = ab_test("new_user_free_points", '100', '200', '300')
+ @starter_points = ab_test(:new_user_free_points, '100', '200', '300')
end
```
@@ -87,14 +87,14 @@ Example: Conversion tracking (in a controller!)
```ruby
def buy_new_points
# some business logic
- finished("new_user_free_points")
+ finished(:new_user_free_points)
end
```
Example: Conversion tracking (in a view)
```erb
-Thanks for signing up, dude! <% finished("signup_page_redesign") %>
+Thanks for signing up, dude! <% finished(:signup_page_redesign) %>
```
You can find more examples, tutorials and guides on the [wiki](https://github.com/splitrb/split/wiki).
@@ -121,11 +121,11 @@ Perhaps you only want to show an alternative to 10% of your visitors because it
To do this you can pass a weight with each alternative in the following ways:
```ruby
-ab_test('homepage design', {'Old' => 18}, {'New' => 2})
+ab_test(:homepage_design, {'Old' => 18}, {'New' => 2})
-ab_test('homepage design', 'Old', {'New' => 1.0/9})
+ab_test(:homepage_design, 'Old', {'New' => 1.0/9})
-ab_test('homepage design', {'Old' => 9}, 'New')
+ab_test(:homepage_design', {'Old' => 9}, 'New')
```
This will only show the new alternative to visitors 1 in 10 times, the default weight for an alternative is 1.
@@ -161,7 +161,7 @@ When a user completes a test their session is reset so that they may start the t
To stop this behaviour you can pass the following option to the `finished` method:
```ruby
-finished('experiment_name', :reset => false)
+finished(:experiment_name, reset: false)
```
The user will then always see the alternative they started with.
@@ -202,9 +202,9 @@ Using Redis will allow ab_users to persist across sessions or machines.
```ruby
Split.configure do |config|
- config.persistence = Split::Persistence::RedisAdapter.with_config(:lookup_by => proc { |context| context.current_user_id })
+ config.persistence = Split::Persistence::RedisAdapter.with_config(lookup_by: -> (context) { context.current_user_id })
# Equivalent
- # config.persistence = Split::Persistence::RedisAdapter.with_config(:lookup_by => :current_user_id }
+ # config.persistence = Split::Persistence::RedisAdapter.with_config(lookup_by: :current_user_id)
end
```
@@ -276,8 +276,8 @@ For example:
``` ruby
Split.configure do |config|
- config.on_experiment_reset = proc{ |experiment| # Do something on reset }
- config.on_experiment_delete = proc{ |experiment| # Do something else on delete }
+ config.on_experiment_reset = -> (example) { # Do something on reset }
+ config.on_experiment_delete = -> (experiment) { # Do something else on delete }
end
```
@@ -298,13 +298,13 @@ run Rack::URLMap.new \
However, if you are using Rails 3: You can mount this inside your app routes by first adding this to the Gemfile:
```ruby
-gem 'split', :require => 'split/dashboard'
+gem 'split', require: 'split/dashboard'
```
Then adding this to config/routes.rb
```ruby
-mount Split::Dashboard, :at => 'split'
+mount Split::Dashboard, at: 'split'
```
You may want to password protect that page, you can do so with `Rack::Auth::Basic` (in your split initializer file)
@@ -317,11 +317,11 @@ end
You can even use Devise or any other Warden-based authentication method to authorize users. Just replace `mount Split::Dashboard, :at => 'split'` in `config/routes.rb` with the following:
```ruby
-match "/split" => Split::Dashboard, :anchor => false, :via => [:get, :post], :constraints => lambda { |request|
+match "/split" => Split::Dashboard, anchor: false, via: [:get, :post], constraints: -> (request) do
request.env['warden'].authenticated? # are we authenticated?
request.env['warden'].authenticate! # authenticate if not already
# or even check any other condition such as request.env['warden'].user.is_admin?
-}
+end
```
More information on this [here](http://steve.dynedge.co.uk/2011/12/09/controlling-access-to-routes-and-rack-apps-in-rails-3-with-devise-and-warden/)
@@ -337,7 +337,7 @@ You can override the default configuration options of Split like so:
```ruby
Split.configure do |config|
config.db_failover = true # handle redis errors gracefully
- config.db_failover_on_db_error = proc{|error| Rails.logger.error(error.message) }
+ config.db_failover_on_db_error = -> (error) { Rails.logger.error(error.message) }
config.allow_multiple_experiments = true
config.enabled = true
config.persistence = Split::Persistence::SessionAdapter
@@ -363,7 +363,7 @@ Split.configure do |config|
config.ignore_ip_addresses << '81.19.48.130' # or regex: /81\.19\.48\.[0-9]+/
# or provide your own filter functionality, the default is proc{ |request| is_robot? || is_ignored_ip_address? }
- config.ignore_filter = proc{ |request| CustomExcludeLogic.excludes?(request) }
+ config.ignore_filter = -> (request) { CustomExcludeLogic.excludes?(request) }
end
```
@@ -376,15 +376,15 @@ algorithm and if the experiment resets once finished:
```ruby
Split.configure do |config|
config.experiments = {
- "my_first_experiment" => {
- :alternatives => ["a", "b"],
- :resettable => false
+ my_first_experiment: {
+ alternatives: ["a", "b"],
+ resettable: false
},
- "my_second_experiment" => {
- :algorithm => 'Split::Algorithms::Whiplash',
- :alternatives => [
- { :name => "a", :percent => 67 },
- { :name => "b", :percent => 33 }
+ :my_second_experiment => {
+ algorithm: 'Split::Algorithms::Whiplash',
+ alternatives: [
+ { name: "a", percent: 67 },
+ { name: "b", percent: 33 }
]
}
}
@@ -418,13 +418,13 @@ my_second_experiment:
This simplifies the calls from your code:
```ruby
-ab_test("my_first_experiment")
+ab_test(:my_first_experiment)
```
and:
```ruby
-finished("my_first_experiment")
+finished(:my_first_experiment)
```
You can also add meta data for each experiment, very useful when you need more than an alternative name to change behaviour:
@@ -468,9 +468,9 @@ the `:metric` option.
```ruby
Split.configure do |config|
config.experiments = {
- "my_first_experiment" => {
- :alternatives => ["a", "b"],
- :metric => :my_metric,
+ my_first_experiment: {
+ alternatives: ["a", "b"],
+ metric: :my_metric,
}
}
end
@@ -496,7 +496,7 @@ You might wish to allow an experiment to have multiple, distinguishable goals.
The API to define goals for an experiment is this:
```ruby
-ab_test({"link_color" => ["purchase", "refund"]}, "red", "blue")
+ab_test({link_color: ["purchase", "refund"]}, "red", "blue")
```
or you can you can define them in a configuration file:
@@ -504,9 +504,9 @@ or you can you can define them in a configuration file:
```ruby
Split.configure do |config|
config.experiments = {
- "link_color" => {
- :alternatives => ["red", "blue"],
- :goals => ["purchase", "refund"]
+ link_color: {
+ alternatives: ["red", "blue"],
+ goals: ["purchase", "refund"]
}
}
end
@@ -515,7 +515,7 @@ end
To complete a goal conversion, you do it like:
```ruby
-finished("link_color" => "purchase")
+finished(link_color: "purchase")
```
**NOTE:** This does not mean that a single experiment can have/complete progressive goals.
| 37 | Merge pull request #320 from grosser/grosser/symbols | 37 | .md | md | mit | splitrb/split |
10071127 | <NME> README.md
<BEF> # [Split](https://libraries.io/rubygems/split)
[](http://badge.fury.io/rb/split)

[](https://codeclimate.com/github/splitrb/split)
[](https://codeclimate.com/github/splitrb/split/coverage)
[](https://github.com/RichardLitt/standard-readme)
[](https://www.codetriage.com/splitrb/split)
> 📈 The Rack Based A/B testing framework https://libraries.io/rubygems/split
Split is a rack based A/B testing framework designed to work with Rails, Sinatra or any other rack based app.
Split is heavily inspired by the [Abingo](https://github.com/ryanb/abingo) and [Vanity](https://github.com/assaf/vanity) Rails A/B testing plugins and [Resque](https://github.com/resque/resque) in its use of Redis.
Split is designed to be hacker friendly, allowing for maximum customisation and extensibility.
## Install
### Requirements
Split v4.0+ is currently tested with Ruby >= 2.5 and Rails >= 5.2.
If your project requires compatibility with Ruby 2.4.x or older Rails versions. You can try v3.0 or v0.8.0(for Ruby 1.9.3)
Split uses Redis as a datastore.
Split only supports Redis 4.0 or greater.
If you're on OS X, Homebrew is the simplest way to install Redis:
```bash
brew install redis
redis-server /usr/local/etc/redis.conf
```
You now have a Redis daemon running on port `6379`.
### Setup
```bash
gem install split
```
#### Rails
Adding `gem 'split'` to your Gemfile will autoload it when rails starts up, as long as you've configured Redis it will 'just work'.
#### Sinatra
To configure Sinatra with Split you need to enable sessions and mix in the helper methods. Add the following lines at the top of your Sinatra app:
```ruby
require 'split'
class MySinatraApp < Sinatra::Base
enable :sessions
helpers Split::Helper
get '/' do
...
end
```
## Usage
To begin your A/B test use the `ab_test` method, naming your experiment with the first argument and then the different alternatives which you wish to test on as the other arguments.
Example: View
```erb
<% ab_test("login_button", "/images/button1.jpg", "/images/button2.jpg") do |button_file| %>
<%= image_tag(button_file, :alt => "Login!") %>
<% end %>
```
```erb
```ruby
def register_new_user
# See what level of free points maximizes users' decision to buy replacement points.
@starter_points = ab_test("new_user_free_points", '100', '200', '300')
end
```
```ruby
def register_new_user
```ruby
def buy_new_points
# some business logic
finished("new_user_free_points")
end
```
Example: Conversion tracking (in a view)
```erb
Thanks for signing up, dude! <% finished("signup_page_redesign") %>
```
You can find more examples, tutorials and guides on the [wiki](https://github.com/splitrb/split/wiki).
Example: Conversion tracking (in a view)
```erb
Thanks for signing up, dude! <% ab_finished(:signup_page_redesign) %>
```
You can find more examples, tutorials and guides on the [wiki](https://github.com/splitrb/split/wiki).
## Statistical Validity
Split has two options for you to use to determine which alternative is the best.
The first option (default on the dashboard) uses a z test (n>30) for the difference between your control and alternative conversion rates to calculate statistical significance. This test will tell you whether an alternative is better or worse than your control, but it will not distinguish between which alternative is the best in an experiment with multiple alternatives. Split will only tell you if your experiment is 90%, 95%, or 99% significant, and this test only works if you have more than 30 participants and 5 conversions for each branch.
As per this [blog post](https://www.evanmiller.org/how-not-to-run-an-ab-test.html) on the pitfalls of A/B testing, it is highly recommended that you determine your requisite sample size for each branch before running the experiment. Otherwise, you'll have an increased rate of false positives (experiments which show a significant effect where really there is none).
[Here](https://www.evanmiller.org/ab-testing/sample-size.html) is a sample size calculator for your convenience.
The second option uses simulations from a beta distribution to determine the probability that the given alternative is the winner compared to all other alternatives. You can view these probabilities by clicking on the drop-down menu labeled "Confidence." This option should be used when the experiment has more than just 1 control and 1 alternative. It can also be used for a simple, 2-alternative A/B test.
To do this you can pass a weight with each alternative in the following ways:
```ruby
ab_test('homepage design', {'Old' => 18}, {'New' => 2})
ab_test('homepage design', 'Old', {'New' => 1.0/9})
ab_test('homepage design', {'Old' => 9}, 'New')
```
This will only show the new alternative to visitors 1 in 10 times, the default weight for an alternative is 1.
Perhaps you only want to show an alternative to 10% of your visitors because it is very experimental or not yet fully load tested.
To do this you can pass a weight with each alternative in the following ways:
```ruby
ab_test(:homepage_design, {'Old' => 18}, {'New' => 2})
ab_test(:homepage_design, 'Old', {'New' => 1.0/9})
ab_test(:homepage_design, {'Old' => 9}, 'New')
```
This will only show the new alternative to visitors 1 in 10 times, the default weight for an alternative is 1.
### Overriding alternatives
For development and testing, you may wish to force your app to always return an alternative.
You can do this by passing it as a parameter in the url.
If you have an experiment called `button_color` with alternatives called `red` and `blue` used on your homepage, a url such as:
http://myawesomesite.com?ab_test[button_color]=red
will always have red buttons. This won't be stored in your session or count towards to results, unless you set the `store_override` configuration option.
In the event you want to disable all tests without having to know the individual experiment names, add a `SPLIT_DISABLE` query parameter.
http://myawesomesite.com?SPLIT_DISABLE=true
To stop this behaviour you can pass the following option to the `finished` method:
```ruby
finished('experiment_name', :reset => false)
```
The user will then always see the alternative they started with.
```ruby
# Create a file with these contents at 'spec/support/split_helper.rb'
# and ensure it is `require`d in your rails_helper.rb or spec_helper.rb
module SplitHelper
# Force a specific experiment alternative to always be returned:
# use_ab_test(signup_form: "single_page")
#
# Force alternatives for multiple experiments:
# use_ab_test(signup_form: "single_page", pricing: "show_enterprise_prices")
#
def use_ab_test(alternatives_by_experiment)
allow_any_instance_of(Split::Helper).to receive(:ab_test) do |_receiver, experiment, &block|
variant = alternatives_by_experiment.fetch(experiment) { |key| raise "Unknown experiment '#{key}'" }
block.call(variant) unless block.nil?
variant
end
end
end
# Make the `use_ab_test` method available to all specs:
RSpec.configure do |config|
config.include SplitHelper
end
```
Now you can call `use_ab_test(alternatives_by_experiment)` in your specs, for example:
```ruby
it "registers using experimental signup" do
use_ab_test experiment_name: "alternative_name"
post "/signups"
...
end
```
```ruby
Split.configure do |config|
config.persistence = Split::Persistence::RedisAdapter.with_config(:lookup_by => proc { |context| context.current_user_id })
# Equivalent
# config.persistence = Split::Persistence::RedisAdapter.with_config(:lookup_by => :current_user_id }
end
```
### Reset after completion
When a user completes a test their session is reset so that they may start the test again in the future.
To stop this behaviour you can pass the following option to the `ab_finished` method:
```ruby
ab_finished(:experiment_name, reset: false)
```
The user will then always see the alternative they started with.
Any old unfinished experiment key will be deleted from the user's data storage if the experiment had been removed or is over and a winner had been chosen. This allows a user to enroll into any new experiment in cases when the `allow_multiple_experiments` config option is set to `false`.
### Reset experiments manually
By default Split automatically resets the experiment whenever it detects the configuration for an experiment has changed (e.g. you call `ab_test` with different alternatives). You can prevent this by setting the option `reset_manually` to `true`.
You may want to do this when you want to change something, like the variants' names, the metadata about an experiment, etc. without resetting everything.
### Multiple experiments at once
By default Split will avoid users participating in multiple experiments at once. This means you are less likely to skew results by adding in more variation to your tests.
To stop this behaviour and allow users to participate in multiple experiments at once set the `allow_multiple_experiments` config option to true like so:
```ruby
Split.configure do |config|
config.allow_multiple_experiments = true
end
```
This will allow the user to participate in any number of experiments and belong to any alternative in each experiment. This has the possible downside of a variation in one experiment influencing the outcome of another.
To address this, setting the `allow_multiple_experiments` config option to 'control' like so:
```ruby
Split.configure do |config|
config.allow_multiple_experiments = 'control'
end
```
For this to work, each and every experiment you define must have an alternative named 'control'. This will allow the user to participate in multiple experiments as long as the user belongs to the alternative 'control' in each experiment. As soon as the user belongs to an alternative named something other than 'control' the user may not participate in any more experiments. Calling ab_test(<other experiments>) will always return the first alternative without adding the user to that experiment.
### Experiment Persistence
Split comes with three built-in persistence adapters for storing users and the alternatives they've been given for each experiment.
By default Split will store the tests for each user in the session.
You can optionally configure Split to use a cookie, Redis, or any custom adapter of your choosing.
#### Cookies
```ruby
Split.configure do |config|
config.persistence = :cookie
end
```
When using the cookie persistence, Split stores data into an anonymous tracking cookie named 'split', which expires in 1 year. To change that, set the `persistence_cookie_length` in the configuration (unit of time in seconds).
```ruby
Split.configure do |config|
config.persistence = :cookie
config.persistence_cookie_length = 2592000 # 30 days
``` ruby
Split.configure do |config|
config.on_experiment_reset = proc{ |experiment| # Do something on reset }
config.on_experiment_delete = proc{ |experiment| # Do something else on delete }
end
```
Using Redis will allow ab_users to persist across sessions or machines.
```ruby
Split.configure do |config|
config.persistence = Split::Persistence::RedisAdapter.with_config(lookup_by: -> (context) { context.current_user_id })
# Equivalent
# config.persistence = Split::Persistence::RedisAdapter.with_config(lookup_by: :current_user_id)
end
```
Options:
* `lookup_by`: method to invoke per request for uniquely identifying ab_users (mandatory configuration)
* `namespace`: separate namespace to store these persisted values (default "persistence")
However, if you are using Rails 3: You can mount this inside your app routes by first adding this to the Gemfile:
```ruby
gem 'split', :require => 'split/dashboard'
```
Then adding this to config/routes.rb
```ruby
mount Split::Dashboard, :at => 'split'
```
You may want to password protect that page, you can do so with `Rack::Auth::Basic` (in your split initializer file)
config.persistence = Split::Persistence::DualAdapter.with_config(
logged_in: -> (context) { !context.send(:current_user).try(:id).nil? },
logged_in_adapter: redis_adapter,
logged_out_adapter: cookie_adapter)
config.persistence_cookie_length = 2592000 # 30 days
end
You can even use Devise or any other Warden-based authentication method to authorize users. Just replace `mount Split::Dashboard, :at => 'split'` in `config/routes.rb` with the following:
```ruby
match "/split" => Split::Dashboard, :anchor => false, :via => [:get, :post], :constraints => lambda { |request|
request.env['warden'].authenticated? # are we authenticated?
request.env['warden'].authenticate! # authenticate if not already
# or even check any other condition such as request.env['warden'].user.is_admin?
}
```
More information on this [here](http://steve.dynedge.co.uk/2011/12/09/controlling-access-to-routes-and-rack-apps-in-rails-3-with-devise-and-warden/)
```
### Trial Event Hooks
You can define methods that will be called at the same time as experiment
alternative participation and goal completion.
For example:
```ruby
Split.configure do |config|
config.db_failover = true # handle redis errors gracefully
config.db_failover_on_db_error = proc{|error| Rails.logger.error(error.message) }
config.allow_multiple_experiments = true
config.enabled = true
config.persistence = Split::Persistence::SessionAdapter
Set these attributes to a method name available in the same context as the
`ab_test` method. These methods should accept one argument, a `Trial` instance.
``` ruby
def log_trial(trial)
logger.info "experiment=%s alternative=%s user=%s" %
[ trial.experiment.name, trial.alternative, current_user.id ]
end
def log_trial_choose(trial)
logger.info "[new user] experiment=%s alternative=%s user=%s" %
[ trial.experiment.name, trial.alternative, current_user.id ]
end
def log_trial_complete(trial)
logger.info "experiment=%s alternative=%s user=%s complete=true" %
[ trial.experiment.name, trial.alternative, current_user.id ]
end
config.ignore_ip_addresses << '81.19.48.130' # or regex: /81\.19\.48\.[0-9]+/
# or provide your own filter functionality, the default is proc{ |request| is_robot? || is_ignored_ip_address? }
config.ignore_filter = proc{ |request| CustomExcludeLogic.excludes?(request) }
end
```
in the controller:
``` ruby
helper_method :log_trial_choose
def log_trial_choose(trial)
```ruby
Split.configure do |config|
config.experiments = {
"my_first_experiment" => {
:alternatives => ["a", "b"],
:resettable => false
},
"my_second_experiment" => {
:algorithm => 'Split::Algorithms::Whiplash',
:alternatives => [
{ :name => "a", :percent => 67 },
{ :name => "b", :percent => 33 }
]
}
}
config.on_experiment_delete = -> (experiment) { # Do something else on delete }
# before experiment reset or deleted
config.on_before_experiment_reset = -> (example) { # Do something on reset }
config.on_before_experiment_delete = -> (experiment) { # Do something else on delete }
# after experiment winner had been set
config.on_experiment_winner_choose = -> (experiment) { # Do something on winner choose }
end
```
## Web Interface
Split comes with a Sinatra-based front end to get an overview of how your experiments are doing.
If you are running Rails 2: You can mount this inside your app using Rack::URLMap in your `config.ru`
```ruby
require 'split/dashboard'
run Rack::URLMap.new \
"/" => Your::App.new,
"/split" => Split::Dashboard.new
```
However, if you are using Rails 3 or higher: You can mount this inside your app routes by first adding this to the Gemfile:
```ruby
gem 'split', require: 'split/dashboard'
This simplifies the calls from your code:
```ruby
ab_test("my_first_experiment")
```
and:
```ruby
finished("my_first_experiment")
```
You can also add meta data for each experiment, very useful when you need more than an alternative name to change behaviour:
# Protect against timing attacks:
# - Use & (do not use &&) so that it doesn't short circuit.
# - Use digests to stop length information leaking
ActiveSupport::SecurityUtils.secure_compare(::Digest::SHA256.hexdigest(username), ::Digest::SHA256.hexdigest(ENV["SPLIT_USERNAME"])) &
ActiveSupport::SecurityUtils.secure_compare(::Digest::SHA256.hexdigest(password), ::Digest::SHA256.hexdigest(ENV["SPLIT_PASSWORD"]))
end
# Apps without activesupport
Split::Dashboard.use Rack::Auth::Basic do |username, password|
# Protect against timing attacks:
# - Use & (do not use &&) so that it doesn't short circuit.
# - Use digests to stop length information leaking
Rack::Utils.secure_compare(::Digest::SHA256.hexdigest(username), ::Digest::SHA256.hexdigest(ENV["SPLIT_USERNAME"])) &
Rack::Utils.secure_compare(::Digest::SHA256.hexdigest(password), ::Digest::SHA256.hexdigest(ENV["SPLIT_PASSWORD"]))
end
```
You can even use Devise or any other Warden-based authentication method to authorize users. Just replace `mount Split::Dashboard, :at => 'split'` in `config/routes.rb` with the following:
```ruby
match "/split" => Split::Dashboard, anchor: false, via: [:get, :post, :delete], constraints: -> (request) do
request.env['warden'].authenticated? # are we authenticated?
request.env['warden'].authenticate! # authenticate if not already
# or even check any other condition such as request.env['warden'].user.is_admin?
end
```
More information on this [here](https://steve.dynedge.co.uk/2011/12/09/controlling-access-to-routes-and-rack-apps-in-rails-3-with-devise-and-warden/)
### Screenshot

## Configuration
You can override the default configuration options of Split like so:
```ruby
```ruby
Split.configure do |config|
config.experiments = {
"my_first_experiment" => {
:alternatives => ["a", "b"],
:metric => :my_metric,
}
}
end
config.redis = "redis://custom.redis.url:6380"
end
```
Split looks for the Redis host in the environment variable `REDIS_URL` then
defaults to `redis://localhost:6379` if not specified by configure block.
On platforms like Heroku, Split will use the value of `REDIS_PROVIDER` to
determine which env variable key to use when retrieving the host config. This
defaults to `REDIS_URL`.
### Filtering
In most scenarios you don't want to have AB-Testing enabled for web spiders, robots or special groups of users.
Split provides functionality to filter this based on a predefined, extensible list of bots, IP-lists or custom exclude logic.
```ruby
Split.configure do |config|
# bot config
The API to define goals for an experiment is this:
```ruby
ab_test({"link_color" => ["purchase", "refund"]}, "red", "blue")
```
or you can you can define them in a configuration file:
config.ignore_filter = -> (request) { CustomExcludeLogic.excludes?(request) }
```ruby
Split.configure do |config|
config.experiments = {
"link_color" => {
:alternatives => ["red", "blue"],
:goals => ["purchase", "refund"]
}
}
end
```ruby
Split.configure do |config|
To complete a goal conversion, you do it like:
```ruby
finished("link_color" => "purchase")
```
**NOTE:** This does not mean that a single experiment can have/complete progressive goals.
alternatives: [
{ name: "a", percent: 67 },
{ name: "b", percent: 33 }
]
}
}
end
```
You can also store your experiments in a YAML file:
```ruby
Split.configure do |config|
config.experiments = YAML.load_file "config/experiments.yml"
end
```
You can then define the YAML file like:
```yaml
my_first_experiment:
alternatives:
- a
- b
my_second_experiment:
alternatives:
- name: a
percent: 67
- name: b
percent: 33
resettable: false
```
This simplifies the calls from your code:
```ruby
ab_test(:my_first_experiment)
```
and:
```ruby
ab_finished(:my_first_experiment)
```
You can also add meta data for each experiment, which is very useful when you need more than an alternative name to change behaviour:
```ruby
Split.configure do |config|
config.experiments = {
my_first_experiment: {
alternatives: ["a", "b"],
metadata: {
"a" => {"text" => "Have a fantastic day"},
"b" => {"text" => "Don't get hit by a bus"}
}
}
}
end
```
```yaml
my_first_experiment:
alternatives:
- a
- b
metadata:
a:
text: "Have a fantastic day"
b:
text: "Don't get hit by a bus"
```
This allows for some advanced experiment configuration using methods like:
```ruby
trial.alternative.name # => "a"
trial.metadata['text'] # => "Have a fantastic day"
```
or in views:
```erb
<% ab_test("my_first_experiment") do |alternative, meta| %>
<%= alternative %>
<small><%= meta['text'] %></small>
<% end %>
```
The keys used in meta data should be Strings
#### Metrics
You might wish to track generic metrics, such as conversions, and use
those to complete multiple different experiments without adding more to
your code. You can use the configuration hash to do this, thanks to
the `:metric` option.
```ruby
Split.configure do |config|
config.experiments = {
my_first_experiment: {
alternatives: ["a", "b"],
metric: :my_metric
}
}
end
```
Your code may then track a completion using the metric instead of
the experiment name:
```ruby
ab_finished(:my_metric)
```
You can also create a new metric by instantiating and saving a new Metric object.
```ruby
Split::Metric.new(:my_metric)
Split::Metric.save
```
#### Goals
You might wish to allow an experiment to have multiple, distinguishable goals.
The API to define goals for an experiment is this:
```ruby
ab_test({link_color: ["purchase", "refund"]}, "red", "blue")
```
or you can define them in a configuration file:
```ruby
Split.configure do |config|
config.experiments = {
link_color: {
alternatives: ["red", "blue"],
goals: ["purchase", "refund"]
}
}
end
```
To complete a goal conversion, you do it like:
```ruby
ab_finished(link_color: "purchase")
```
Note that if you pass additional options, that should be a separate hash:
```ruby
ab_finished({ link_color: "purchase" }, reset: false)
```
**NOTE:** This does not mean that a single experiment can complete more than one goal.
Once you finish one of the goals, the test is considered to be completed, and finishing the other goal will no longer register. (Assuming the test runs with `reset: false`.)
**Good Example**: Test if listing Plan A first result in more conversions to Plan A (goal: "plana_conversion") or Plan B (goal: "planb_conversion").
**Bad Example**: Test if button color increases conversion rate through multiple steps of a funnel. THIS WILL NOT WORK.
**Bad Example**: Test both how button color affects signup *and* how it affects login, at the same time. THIS WILL NOT WORK.
#### Combined Experiments
If you want to test how button color affects signup *and* how it affects login at the same time, use combined experiments.
Configure like so:
```ruby
Split.configuration.experiments = {
:button_color_experiment => {
:alternatives => ["blue", "green"],
:combined_experiments => ["button_color_on_signup", "button_color_on_login"]
}
}
```
Starting the combined test starts all combined experiments
```ruby
ab_combined_test(:button_color_experiment)
```
Finish each combined test as normal
```ruby
ab_finished(:button_color_on_login)
ab_finished(:button_color_on_signup)
```
**Additional Configuration**:
* Be sure to enable `allow_multiple_experiments`
* In Sinatra include the CombinedExperimentsHelper
```
helpers Split::CombinedExperimentsHelper
```
### DB failover solution
Due to the fact that Redis has no automatic failover mechanism, it's
possible to switch on the `db_failover` config option, so that `ab_test`
and `ab_finished` will not crash in case of a db failure. `ab_test` always
delivers alternative A (the first one) in that case.
It's also possible to set a `db_failover_on_db_error` callback (proc)
for example to log these errors via Rails.logger.
### Redis
You may want to change the Redis host and port Split connects to, or
set various other options at startup.
Split has a `redis` setter which can be given a string or a Redis
object. This means if you're already using Redis in your app, Split
can re-use the existing connection.
String: `Split.redis = 'redis://localhost:6379'`
Redis: `Split.redis = $redis`
For our rails app we have a `config/initializers/split.rb` file where
we load `config/split.yml` by hand and set the Redis information
appropriately.
Here's our `config/split.yml`:
```yml
development: redis://localhost:6379
test: redis://localhost:6379
staging: redis://redis1.example.com:6379
fi: redis://localhost:6379
production: redis://redis1.example.com:6379
```
And our initializer:
```ruby
split_config = YAML.load_file(Rails.root.join('config', 'split.yml'))
Split.redis = split_config[Rails.env]
```
### Redis Caching (v4.0+)
In some high-volume usage scenarios, Redis load can be incurred by repeated
fetches for fairly static data. Enabling caching will reduce this load.
```ruby
Split.configuration.cache = true
````
This currently caches:
- `Split::ExperimentCatalog.find`
- `Split::Experiment.start_time`
- `Split::Experiment.winner`
## Namespaces
If you're running multiple, separate instances of Split you may want
to namespace the keyspaces so they do not overlap. This is not unlike
the approach taken by many memcached clients.
This feature can be provided by the [redis-namespace](https://github.com/defunkt/redis-namespace)
library. To configure Split to use `Redis::Namespace`, do the following:
1. Add `redis-namespace` to your Gemfile:
```ruby
gem 'redis-namespace'
```
2. Configure `Split.redis` to use a `Redis::Namespace` instance (possible in an
initializer):
```ruby
redis = Redis.new(url: ENV['REDIS_URL']) # or whatever config you want
Split.redis = Redis::Namespace.new(:your_namespace, redis: redis)
```
## Outside of a Web Session
Split provides the Helper module to facilitate running experiments inside web sessions.
Alternatively, you can access the underlying Metric, Trial, Experiment and Alternative objects to
conduct experiments that are not tied to a web session.
```ruby
# create a new experiment
experiment = Split::ExperimentCatalog.find_or_create('color', 'red', 'blue')
# create a new trial
trial = Split::Trial.new(:experiment => experiment)
# run trial
trial.choose!
# get the result, returns either red or blue
trial.alternative.name
# if the goal has been achieved, increment the successful completions for this alternative.
if goal_achieved?
trial.complete!
end
```
## Algorithms
By default, Split ships with `Split::Algorithms::WeightedSample` that randomly selects from possible alternatives for a traditional a/b test.
It is possible to specify static weights to favor certain alternatives.
`Split::Algorithms::Whiplash` is an implementation of a [multi-armed bandit algorithm](http://stevehanov.ca/blog/index.php?id=132).
This algorithm will automatically weight the alternatives based on their relative performance,
choosing the better-performing ones more often as trials are completed.
`Split::Algorithms::BlockRandomization` is an algorithm that ensures equal
participation across all alternatives. This algorithm will choose the alternative
with the fewest participants. In the event of multiple minimum participant alternatives
(i.e. starting a new "Block") the algorithm will choose a random alternative from
those minimum participant alternatives.
Users may also write their own algorithms. The default algorithm may be specified globally in the configuration file, or on a per experiment basis using the experiments hash of the configuration file.
To change the algorithm globally for all experiments, use the following in your initializer:
```ruby
Split.configure do |config|
config.algorithm = Split::Algorithms::Whiplash
end
```
## Extensions
- [Split::Export](https://github.com/splitrb/split-export) - Easily export A/B test data out of Split.
- [Split::Analytics](https://github.com/splitrb/split-analytics) - Push test data to Google Analytics.
- [Split::Mongoid](https://github.com/MongoHQ/split-mongoid) - Store experiment data in mongoid (still uses redis).
- [Split::Cacheable](https://github.com/harrystech/split_cacheable) - Automatically create cache buckets per test.
- [Split::Counters](https://github.com/bernardkroes/split-counters) - Add counters per experiment and alternative.
- [Split::Cli](https://github.com/craigmcnamara/split-cli) - A CLI to trigger Split A/B tests.
## Screencast
Ryan bates has produced an excellent 10 minute screencast about split on the Railscasts site: [A/B Testing with Split](http://railscasts.com/episodes/331-a-b-testing-with-split)
## Blogposts
* [Recipe: A/B testing with KISSMetrics and the split gem](https://robots.thoughtbot.com/post/9595887299/recipe-a-b-testing-with-kissmetrics-and-the-split-gem)
* [Rails A/B testing with Split on Heroku](http://blog.nathanhumbert.com/2012/02/rails-ab-testing-with-split-on-heroku.html)
## Backers
Support us with a monthly donation and help us continue our activities. [[Become a backer](https://opencollective.com/split#backer)]
<a href="https://opencollective.com/split/backer/0/website" target="_blank"><img src="https://opencollective.com/split/backer/0/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/1/website" target="_blank"><img src="https://opencollective.com/split/backer/1/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/2/website" target="_blank"><img src="https://opencollective.com/split/backer/2/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/3/website" target="_blank"><img src="https://opencollective.com/split/backer/3/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/4/website" target="_blank"><img src="https://opencollective.com/split/backer/4/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/5/website" target="_blank"><img src="https://opencollective.com/split/backer/5/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/6/website" target="_blank"><img src="https://opencollective.com/split/backer/6/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/7/website" target="_blank"><img src="https://opencollective.com/split/backer/7/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/8/website" target="_blank"><img src="https://opencollective.com/split/backer/8/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/9/website" target="_blank"><img src="https://opencollective.com/split/backer/9/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/10/website" target="_blank"><img src="https://opencollective.com/split/backer/10/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/11/website" target="_blank"><img src="https://opencollective.com/split/backer/11/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/12/website" target="_blank"><img src="https://opencollective.com/split/backer/12/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/13/website" target="_blank"><img src="https://opencollective.com/split/backer/13/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/14/website" target="_blank"><img src="https://opencollective.com/split/backer/14/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/15/website" target="_blank"><img src="https://opencollective.com/split/backer/15/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/16/website" target="_blank"><img src="https://opencollective.com/split/backer/16/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/17/website" target="_blank"><img src="https://opencollective.com/split/backer/17/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/18/website" target="_blank"><img src="https://opencollective.com/split/backer/18/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/19/website" target="_blank"><img src="https://opencollective.com/split/backer/19/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/20/website" target="_blank"><img src="https://opencollective.com/split/backer/20/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/21/website" target="_blank"><img src="https://opencollective.com/split/backer/21/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/22/website" target="_blank"><img src="https://opencollective.com/split/backer/22/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/23/website" target="_blank"><img src="https://opencollective.com/split/backer/23/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/24/website" target="_blank"><img src="https://opencollective.com/split/backer/24/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/25/website" target="_blank"><img src="https://opencollective.com/split/backer/25/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/26/website" target="_blank"><img src="https://opencollective.com/split/backer/26/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/27/website" target="_blank"><img src="https://opencollective.com/split/backer/27/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/28/website" target="_blank"><img src="https://opencollective.com/split/backer/28/avatar.svg"></a>
<a href="https://opencollective.com/split/backer/29/website" target="_blank"><img src="https://opencollective.com/split/backer/29/avatar.svg"></a>
## Sponsors
Become a sponsor and get your logo on our README on Github with a link to your site. [[Become a sponsor](https://opencollective.com/split#sponsor)]
<a href="https://opencollective.com/split/sponsor/0/website" target="_blank"><img src="https://opencollective.com/split/sponsor/0/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/1/website" target="_blank"><img src="https://opencollective.com/split/sponsor/1/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/2/website" target="_blank"><img src="https://opencollective.com/split/sponsor/2/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/3/website" target="_blank"><img src="https://opencollective.com/split/sponsor/3/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/4/website" target="_blank"><img src="https://opencollective.com/split/sponsor/4/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/5/website" target="_blank"><img src="https://opencollective.com/split/sponsor/5/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/6/website" target="_blank"><img src="https://opencollective.com/split/sponsor/6/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/7/website" target="_blank"><img src="https://opencollective.com/split/sponsor/7/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/8/website" target="_blank"><img src="https://opencollective.com/split/sponsor/8/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/9/website" target="_blank"><img src="https://opencollective.com/split/sponsor/9/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/10/website" target="_blank"><img src="https://opencollective.com/split/sponsor/10/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/11/website" target="_blank"><img src="https://opencollective.com/split/sponsor/11/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/12/website" target="_blank"><img src="https://opencollective.com/split/sponsor/12/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/13/website" target="_blank"><img src="https://opencollective.com/split/sponsor/13/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/14/website" target="_blank"><img src="https://opencollective.com/split/sponsor/14/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/15/website" target="_blank"><img src="https://opencollective.com/split/sponsor/15/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/16/website" target="_blank"><img src="https://opencollective.com/split/sponsor/16/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/17/website" target="_blank"><img src="https://opencollective.com/split/sponsor/17/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/18/website" target="_blank"><img src="https://opencollective.com/split/sponsor/18/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/19/website" target="_blank"><img src="https://opencollective.com/split/sponsor/19/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/20/website" target="_blank"><img src="https://opencollective.com/split/sponsor/20/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/21/website" target="_blank"><img src="https://opencollective.com/split/sponsor/21/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/22/website" target="_blank"><img src="https://opencollective.com/split/sponsor/22/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/23/website" target="_blank"><img src="https://opencollective.com/split/sponsor/23/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/24/website" target="_blank"><img src="https://opencollective.com/split/sponsor/24/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/25/website" target="_blank"><img src="https://opencollective.com/split/sponsor/25/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/26/website" target="_blank"><img src="https://opencollective.com/split/sponsor/26/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/27/website" target="_blank"><img src="https://opencollective.com/split/sponsor/27/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/28/website" target="_blank"><img src="https://opencollective.com/split/sponsor/28/avatar.svg"></a>
<a href="https://opencollective.com/split/sponsor/29/website" target="_blank"><img src="https://opencollective.com/split/sponsor/29/avatar.svg"></a>
## Contribute
Please do! Over 70 different people have contributed to the project, you can see them all here: https://github.com/splitrb/split/graphs/contributors.
### Development
The source code is hosted at [GitHub](https://github.com/splitrb/split).
Report issues and feature requests on [GitHub Issues](https://github.com/splitrb/split/issues).
You can find a discussion form on [Google Groups](https://groups.google.com/d/forum/split-ruby).
### Tests
Run the tests like this:
# Start a Redis server in another tab.
redis-server
bundle
rake spec
### A Note on Patches and Pull Requests
* Fork the project.
* Make your feature addition or bug fix.
* Add tests for it. This is important so I don't break it in a
future version unintentionally.
* Add documentation if necessary.
* Commit. Do not mess with the rakefile, version, or history.
(If you want to have your own version, that is fine. But bump the version in a commit by itself, which I can ignore when I pull.)
* Send a pull request. Bonus points for topic branches.
### Code of Conduct
Please note that this project is released with a [Contributor Code of Conduct](CODE_OF_CONDUCT.md). By participating in this project you agree to abide by its terms.
## Copyright
[MIT License](LICENSE) © 2019 [Andrew Nesbitt](https://github.com/andrew).
<MSG> Merge pull request #320 from grosser/grosser/symbols
modernize and symbols
<DFF> @@ -68,8 +68,8 @@ It can be used to render different templates, show different text or any other c
Example: View
```erb
-<% ab_test("login_button", "/images/button1.jpg", "/images/button2.jpg") do |button_file| %>
- <%= image_tag(button_file, :alt => "Login!") %>
+<% ab_test(:login_button, "/images/button1.jpg", "/images/button2.jpg") do |button_file| %>
+ <%= image_tag(button_file, alt: "Login!") %>
<% end %>
```
@@ -78,7 +78,7 @@ Example: Controller
```ruby
def register_new_user
# See what level of free points maximizes users' decision to buy replacement points.
- @starter_points = ab_test("new_user_free_points", '100', '200', '300')
+ @starter_points = ab_test(:new_user_free_points, '100', '200', '300')
end
```
@@ -87,14 +87,14 @@ Example: Conversion tracking (in a controller!)
```ruby
def buy_new_points
# some business logic
- finished("new_user_free_points")
+ finished(:new_user_free_points)
end
```
Example: Conversion tracking (in a view)
```erb
-Thanks for signing up, dude! <% finished("signup_page_redesign") %>
+Thanks for signing up, dude! <% finished(:signup_page_redesign) %>
```
You can find more examples, tutorials and guides on the [wiki](https://github.com/splitrb/split/wiki).
@@ -121,11 +121,11 @@ Perhaps you only want to show an alternative to 10% of your visitors because it
To do this you can pass a weight with each alternative in the following ways:
```ruby
-ab_test('homepage design', {'Old' => 18}, {'New' => 2})
+ab_test(:homepage_design, {'Old' => 18}, {'New' => 2})
-ab_test('homepage design', 'Old', {'New' => 1.0/9})
+ab_test(:homepage_design, 'Old', {'New' => 1.0/9})
-ab_test('homepage design', {'Old' => 9}, 'New')
+ab_test(:homepage_design', {'Old' => 9}, 'New')
```
This will only show the new alternative to visitors 1 in 10 times, the default weight for an alternative is 1.
@@ -161,7 +161,7 @@ When a user completes a test their session is reset so that they may start the t
To stop this behaviour you can pass the following option to the `finished` method:
```ruby
-finished('experiment_name', :reset => false)
+finished(:experiment_name, reset: false)
```
The user will then always see the alternative they started with.
@@ -202,9 +202,9 @@ Using Redis will allow ab_users to persist across sessions or machines.
```ruby
Split.configure do |config|
- config.persistence = Split::Persistence::RedisAdapter.with_config(:lookup_by => proc { |context| context.current_user_id })
+ config.persistence = Split::Persistence::RedisAdapter.with_config(lookup_by: -> (context) { context.current_user_id })
# Equivalent
- # config.persistence = Split::Persistence::RedisAdapter.with_config(:lookup_by => :current_user_id }
+ # config.persistence = Split::Persistence::RedisAdapter.with_config(lookup_by: :current_user_id)
end
```
@@ -276,8 +276,8 @@ For example:
``` ruby
Split.configure do |config|
- config.on_experiment_reset = proc{ |experiment| # Do something on reset }
- config.on_experiment_delete = proc{ |experiment| # Do something else on delete }
+ config.on_experiment_reset = -> (example) { # Do something on reset }
+ config.on_experiment_delete = -> (experiment) { # Do something else on delete }
end
```
@@ -298,13 +298,13 @@ run Rack::URLMap.new \
However, if you are using Rails 3: You can mount this inside your app routes by first adding this to the Gemfile:
```ruby
-gem 'split', :require => 'split/dashboard'
+gem 'split', require: 'split/dashboard'
```
Then adding this to config/routes.rb
```ruby
-mount Split::Dashboard, :at => 'split'
+mount Split::Dashboard, at: 'split'
```
You may want to password protect that page, you can do so with `Rack::Auth::Basic` (in your split initializer file)
@@ -317,11 +317,11 @@ end
You can even use Devise or any other Warden-based authentication method to authorize users. Just replace `mount Split::Dashboard, :at => 'split'` in `config/routes.rb` with the following:
```ruby
-match "/split" => Split::Dashboard, :anchor => false, :via => [:get, :post], :constraints => lambda { |request|
+match "/split" => Split::Dashboard, anchor: false, via: [:get, :post], constraints: -> (request) do
request.env['warden'].authenticated? # are we authenticated?
request.env['warden'].authenticate! # authenticate if not already
# or even check any other condition such as request.env['warden'].user.is_admin?
-}
+end
```
More information on this [here](http://steve.dynedge.co.uk/2011/12/09/controlling-access-to-routes-and-rack-apps-in-rails-3-with-devise-and-warden/)
@@ -337,7 +337,7 @@ You can override the default configuration options of Split like so:
```ruby
Split.configure do |config|
config.db_failover = true # handle redis errors gracefully
- config.db_failover_on_db_error = proc{|error| Rails.logger.error(error.message) }
+ config.db_failover_on_db_error = -> (error) { Rails.logger.error(error.message) }
config.allow_multiple_experiments = true
config.enabled = true
config.persistence = Split::Persistence::SessionAdapter
@@ -363,7 +363,7 @@ Split.configure do |config|
config.ignore_ip_addresses << '81.19.48.130' # or regex: /81\.19\.48\.[0-9]+/
# or provide your own filter functionality, the default is proc{ |request| is_robot? || is_ignored_ip_address? }
- config.ignore_filter = proc{ |request| CustomExcludeLogic.excludes?(request) }
+ config.ignore_filter = -> (request) { CustomExcludeLogic.excludes?(request) }
end
```
@@ -376,15 +376,15 @@ algorithm and if the experiment resets once finished:
```ruby
Split.configure do |config|
config.experiments = {
- "my_first_experiment" => {
- :alternatives => ["a", "b"],
- :resettable => false
+ my_first_experiment: {
+ alternatives: ["a", "b"],
+ resettable: false
},
- "my_second_experiment" => {
- :algorithm => 'Split::Algorithms::Whiplash',
- :alternatives => [
- { :name => "a", :percent => 67 },
- { :name => "b", :percent => 33 }
+ :my_second_experiment => {
+ algorithm: 'Split::Algorithms::Whiplash',
+ alternatives: [
+ { name: "a", percent: 67 },
+ { name: "b", percent: 33 }
]
}
}
@@ -418,13 +418,13 @@ my_second_experiment:
This simplifies the calls from your code:
```ruby
-ab_test("my_first_experiment")
+ab_test(:my_first_experiment)
```
and:
```ruby
-finished("my_first_experiment")
+finished(:my_first_experiment)
```
You can also add meta data for each experiment, very useful when you need more than an alternative name to change behaviour:
@@ -468,9 +468,9 @@ the `:metric` option.
```ruby
Split.configure do |config|
config.experiments = {
- "my_first_experiment" => {
- :alternatives => ["a", "b"],
- :metric => :my_metric,
+ my_first_experiment: {
+ alternatives: ["a", "b"],
+ metric: :my_metric,
}
}
end
@@ -496,7 +496,7 @@ You might wish to allow an experiment to have multiple, distinguishable goals.
The API to define goals for an experiment is this:
```ruby
-ab_test({"link_color" => ["purchase", "refund"]}, "red", "blue")
+ab_test({link_color: ["purchase", "refund"]}, "red", "blue")
```
or you can you can define them in a configuration file:
@@ -504,9 +504,9 @@ or you can you can define them in a configuration file:
```ruby
Split.configure do |config|
config.experiments = {
- "link_color" => {
- :alternatives => ["red", "blue"],
- :goals => ["purchase", "refund"]
+ link_color: {
+ alternatives: ["red", "blue"],
+ goals: ["purchase", "refund"]
}
}
end
@@ -515,7 +515,7 @@ end
To complete a goal conversion, you do it like:
```ruby
-finished("link_color" => "purchase")
+finished(link_color: "purchase")
```
**NOTE:** This does not mean that a single experiment can have/complete progressive goals.
| 37 | Merge pull request #320 from grosser/grosser/symbols | 37 | .md | md | mit | splitrb/split |
10071128 | <NME> AUTHORS
<BEF> Ask Solem <[email protected]>
Rune Halvorsen <[email protected]>
Russel Sim <[email protected]>
Brian Rosner <[email protected]>
Hugo Lopes Tavares <[email protected]>
Sverre Johansen <[email protected]>
Bo Shi <[email protected]>
Carl Meyer <[email protected]>
Vinícius das Chagas Silva <[email protected]>
Vanderson Mota dos Santos <[email protected]>
Stefan Foulis <[email protected]>
Michael Richardson <[email protected]>
Halldór Rúnarsson <[email protected]>
Brent Tubbs <[email protected]>
David Cramer <[email protected]>
<MSG> changed my email address to my current work one.
<DFF> @@ -1,3 +1,3 @@
Ask Solem <[email protected]>
Rune Halvorsen <[email protected]>
-Russel Sim <[email protected]>
+Russel Sim <[email protected]>
| 1 | changed my email address to my current work one. | 1 | AUTHORS | bsd-3-clause | ask/chishop |
|
10071129 | <NME> redis_interface.rb
<BEF> # frozen_string_literal: true
module Split
# Simplifies the interface to Redis.
class RedisInterface
def initialize
self.redis = Split.redis
end
def persist_list(list_name, list_values)
if list_values.length > 0
redis.multi do |multi|
tmp_list = "#{list_name}_tmp"
multi.rpush(tmp_list, list_values)
multi.rename(tmp_list, list_name)
end
end
end
def add_to_set(set_name, value)
redis.sadd(set_name, value) unless redis.sismember(set_name, value)
end
private
private
attr_accessor :redis
end
end
<MSG> Remove extra SISMEMBER check when calling RedisInterface#add_to_set
<DFF> @@ -19,7 +19,7 @@ module Split
end
def add_to_set(set_name, value)
- redis.sadd(set_name, value) unless redis.sismember(set_name, value)
+ redis.sadd(set_name, value)
end
private
| 1 | Remove extra SISMEMBER check when calling RedisInterface#add_to_set | 1 | .rb | rb | mit | splitrb/split |
10071130 | <NME> redis_interface.rb
<BEF> # frozen_string_literal: true
module Split
# Simplifies the interface to Redis.
class RedisInterface
def initialize
self.redis = Split.redis
end
def persist_list(list_name, list_values)
if list_values.length > 0
redis.multi do |multi|
tmp_list = "#{list_name}_tmp"
multi.rpush(tmp_list, list_values)
multi.rename(tmp_list, list_name)
end
end
end
def add_to_set(set_name, value)
redis.sadd(set_name, value) unless redis.sismember(set_name, value)
end
private
private
attr_accessor :redis
end
end
<MSG> Remove extra SISMEMBER check when calling RedisInterface#add_to_set
<DFF> @@ -19,7 +19,7 @@ module Split
end
def add_to_set(set_name, value)
- redis.sadd(set_name, value) unless redis.sismember(set_name, value)
+ redis.sadd(set_name, value)
end
private
| 1 | Remove extra SISMEMBER check when calling RedisInterface#add_to_set | 1 | .rb | rb | mit | splitrb/split |
10071131 | <NME> redis_interface.rb
<BEF> # frozen_string_literal: true
module Split
# Simplifies the interface to Redis.
class RedisInterface
def initialize
self.redis = Split.redis
end
def persist_list(list_name, list_values)
if list_values.length > 0
redis.multi do |multi|
tmp_list = "#{list_name}_tmp"
multi.rpush(tmp_list, list_values)
multi.rename(tmp_list, list_name)
end
end
end
def add_to_set(set_name, value)
redis.sadd(set_name, value) unless redis.sismember(set_name, value)
end
private
private
attr_accessor :redis
end
end
<MSG> Remove extra SISMEMBER check when calling RedisInterface#add_to_set
<DFF> @@ -19,7 +19,7 @@ module Split
end
def add_to_set(set_name, value)
- redis.sadd(set_name, value) unless redis.sismember(set_name, value)
+ redis.sadd(set_name, value)
end
private
| 1 | Remove extra SISMEMBER check when calling RedisInterface#add_to_set | 1 | .rb | rb | mit | splitrb/split |
10071132 | <NME> README.md
<BEF>
# transducers.js
A small library for generalized transformation of data. This provides a bunch of transformation functions that can be applied to any data structure. It is a direct port of Clojure's [transducers](http://blog.cognitect.com/blog/2014/8/6/transducers-are-coming) in JavaScript. Read more in [this post](http://jlongster.com/Transducers.js--A-JavaScript-Library-for-Transformation-of-Data).
The algorithm behind this, explained in the above post, not only allows for it to work with any data structure (arrays, objects, iterators, immutable data structures, you name it) but it also provides better performance than other alternatives such as underscore or lodash. This is because there are no intermediate collections. See [this post](http://jlongster.com/Transducers.js-Round-2-with-Benchmarks) for benchmarks.
```
npm install transducers.js
```
For browsers, grab the file `dist/transducers.js`.
When writing programs, we frequently write methods that take in collections, do something with them, and return a result. The problem is that we frequently only write these functions to work a specific data structure, so if we ever change our data type or wanted to reuse that functionality, you can't. We need to decouple these kinds of concerns.
A transducer is a function that takes a reducing function and returns a new one. It can perform the necessary work and call the original reducing function to move on to the next "step". In this library, a transducer a little more than that (it's actually an object that also supports init and finalizer methods) but generally you don't have to worry about these internal details. Read [my post](http://jlongster.com/Transducers.js--A-JavaScript-Library-for-Transformation-of-Data) if you want to learn more about the algorithm.
```js
var transform = compose(
map(x => x * 3),
filter(x => x % 2 === 0),
take(2)
);
seq([1, 2, 3, 4, 5], transform);
// -> [ 6, 12 ]
function* nums() {
var i = 1;
while(true) {
yield i++;
}
}
into([], transform, nums());
// -> [ 6, 12 ]
into([], transform, Immutable.List.of(1, 2, 3, 4, 5))
// -> [ 6, 12 ]
```
All of these work with arrays, objects, and any iterable data structure (like [immutable-js](https://github.com/facebook/immutable-js)) and you get all the high performance guarantees for free. The above code always only performs 2 transformations because of `take(2)`, no matter how large the array. This is done without laziness or any overhead of intermediate structures.
## Transformations
The following transformations are available, and there are more to come (like `partition`).
* `map(coll?, f, ctx?)` — call `f` on each item
* `filter(coll?, f, ctx?)` — only include the items where the result of calling `f` with the item is truthy
* `remove(coll?, f, ctx?)` — only include the items where the result of calling `f` with the item is falsy
* `keep(coll?)` — remove all items that are `null` or `undefined`
* `take(coll?, n)` — grab only the first `n` items
* `takeWhile(coll?, f, ctx?)` — grab only the first items where the result of calling `f` with the item is truthy
* `drop(coll?, n)` — drop the first `n` items and only include the rest
* `dropWhile(coll?, f, ctx?)` — drop the first items where the result of calling `f` with the item is truthy
* `dedupe(coll?)` — remove consecutive duplicates (equality compared with ===)
The above functions optionally take a collection to immediately perform the transformation on, and a context to bind `this` to when calling `f`. That means you can call them in four ways:
* Immediately perform a map: `map([1, 2, 3], x => x + 1)`
* Same as above but call the function with `this` as `ctx`: `map([1, 2, 3], function(x) { return x + 1; }, ctx)`
* Make a map transducer: `map(x => x + 1)`
* Same as above but with `this` as `ctx`: `map(function(x) { return x + 1; }, ctx)`
(I will be using the ES6 fat arrow syntax, but if that's not available just `function` instead)
The signature of running an immediate map is the same familiar one as seen in lodash and underscore, but now you can drop the collection to make a transducer and run multiple transformations with good performance:
```js
var transform = compose(
map(x => x + 1),
filter(x => x % 2 === 0),
take(2)
);
```
`compose` is a provided function that simply turns `compose(f, g)` into `x => f(g(x))`. You use it to build up transformations. The above transformation would always run the map and filter **only twice** becaue only two items are needed, and it short-circuits once it gets two items. Again, this is done without laziness, read more [here](http://jlongster.com/Transducers.js--A-JavaScript-Library-for-Transformation-of-Data).
There are also 2 transducers available for taking collections and "catting" them into the transformation stream:
* `cat` — take collections and forward each item individually, essentially flattening it
* `mapcat(f)` — same as `cat`, but first apply `f` to each collection
Just pass `cat` straight through like so: `compose(filter(x => x.length < 10), cat)`. That would take all arrays with a length less than 10 and flatten them out into a single array.
## Applying Transformations
Building data structure-agnostic transformations is cool, but how do you actually use them? `transducers.js` provides several integration points.
To use a transformation, we need to know how to iterate over the source data structure and how to build up a new one. The former is easy; we can work with arrays, objects, and anything can uses the ES6 iterator protocol (Maps, Sets, generators, etc). All the the below functions works with them.
For the latter, you need to specify what you want back. The following functions allow you to make a new data structure and possibly apply a transformation:
* `toArray(coll, xform?)` — Turn `coll` into an array, applying the transformation `xform` to each item if provided. The transform is optional in case you want to do something like turn an iterator into an array.
* `toObj(coll, xform?)` — Turn `coll` into an object if possible, applying the transformation `xform` if provided. When an object is iterated it produces two-element arrays `[key, value]`, and `obj` will turn these back into an object.
* `toIter(coll, xform?)` — Make an iterator over `coll`, and apply the transformation `xform` to each value if specified. Note that `coll` can just be another iterator. **Transformations will be applied lazily**.
* `seq(coll, xform)` — A generalized method that will return the same data type that was passed in as `coll`, with `xform` applied. You will usually use this unless you know you want an array, object, or iterator. If `coll` is an iterator, another iterator will be returned and transformations will be applied lazily.
* `into(to, xform, from)` — Apply `xform` to each item in `from` and append it to `to`. This has the effect of "pouring" elements into `to`. You will commonly use this when converting one type of object to another.
* `transduce(coll, xform, reducer, init?)` — Like `reduce`, but apply `xform` to each value before passing to `reducer`. If `init` is not specify it will attempt to get it from `reducer`.
The possibilities are endless:
```js
// Map an object
seq({ foo: 1, bar: 2 }, map(kv => [kv[0], kv[1] + 1]));
// -> { foo: 2, bar: 3 }
// Make an array from an object
toArray({ foo: 1, bar: 2 });
// -> [ [ 'foo', 1 ], [ 'bar', 2 ] ]
// Make an array from an iterable
function* nums() {
var i = 1;
while(true) {
yield i++;
}
}
into([], take(3), nums());
// -> [ 1, 2, 3 ]
// Lazily transform an iterable
var iter = seq(nums(), compose(map(x => x * 2),
filter(x => x > 4));
iter.next().value; // -> 6
iter.next().value; // -> 8
iter.next().value; // -> 10
```
## Laziness
Transducers remove the requirement of being lazy to optimize for things like `take(10)`. However, it can still be useful to "bind" a collection to a set of transformations and pass it around, without actually evaluating the transformations.
As noted above, whenever you apply transformations to an iterator it does so lazily. It's easy convert array transformations into a lazy operation, just use the utility function `iterator` to grab an iterator of the array instead:
```js
seq(iterator([1, 2, 3]),
compose(
map(x => x + 1),
filter(x => x % 2 === 0)))
// -> <Iterator>
```
Our transformations are completely blind to the fact that our transformations may or may not be lazy.
## Utility Functions
This library provides a few small utility functions:
* `iterator(coll)` — Get an iterator for `coll`, which can be any type like array, object, iterator, or custom data type
* `push(arr, value)` — Push `value` onto `arr` and return `arr`
* `merge(obj, value)` — Merge `value` into `obj`. `value` can be another object or a two-element array of `[key, value]`
* `range(n)` — Make an array of size `n` filled with numbers from `0..n`.
## immutable-js
We've talked about how this can be applied to any data structure — let's see that in action. Here's how you could use this with [immutable-js](https://github.com/facebook/immutable-js).
```js
Immutable.fromJS(
seq(Immutable.Vector(1, 2, 3, 4, 5),
compose(
map(function(x) { return x + 10; }),
map(function(x) { return x * 2; }),
filter(function(x) { return x % 5 === 0; }),
filter(function(x) { return x % 2 === 0; })))
)
```
We can use our familiar `seq` function because `Immutable.Vector` implements the iterator protocol, so we can iterator over it. Because `seq` is working with an iterator, it returns a new iterator that will *lazily transform each value*. We can simply pass this iterator into `Immutable.Vector.from` to construct a new one, and we have a new transformed immutable vector with no intermediate collections except for one lazy transformer!
The builtin transformations perform well because they minimize allocations, but since we don't have any intermediate structures or laziness machinery, this performs slightly better. The point is not to beat it, but to show that both are high-performance but we can apply our performance to any data structure.
## CSP Channels
This not only works with all the JavaScript data structures you can think of, but it even works for things like streams. Soon channels from [js-csp](https://github.com/ubolonton/js-csp) will be able to take a transformation and you get all of this for channels for free:
```js
var ch = chan(1, compose(
cat,
map(x => x + 1),
dedupe(),
drop(3)
));
```
## The `transducer` protocol
While it's great that you can apply transducers to custom data structures, it's a bit annoying to always have to use constructor functions like `Immutable.fromJS`. One option is to define a new protocol complementary to `iterator`.
This conforms to the [official transducer spec](https://github.com/cognitect-labs/transducers-js/issues/20) so if you implement this, you can use it with all transducer libraries that conform to it.
To implement the transducer protocol, you add methods to the prototype of your data structure. A transformer is an object with three methods: `init`, `result`, and `step`. `init` returns a new empty object, `result`, can perform any finalization steps on the resulting collection, and `step` performs a reduce.
These methods are namespaced and in the future could be symbols. Here's what it looks like for `Immutable.List`:
```js
Immutable.List.prototype['@@transducer/init'] = function() {
return Immutable.List().asMutable();
};
Immutable.List.prototype['@@transducer/result'] = function(lst) {
return lst.asImmutable();
};
Immutable.List.prototype['@@transducer/step'] = function(lst, x) {
return lst.push(x);
};
```
If you implement the transducer protocol, now your data structure will work with *all* of the builtin functions. You can just use `seq` like normal and you get back an immutable vector!
```js
t.seq(Immutable.List.of(1, 2, 3, 4, 5),
t.compose(
t.map(function(x) { return x + 10; }),
t.map(function(x) { return x * 2; }),
t.filter(function(x) { return x % 5 === 0; }),
t.filter(function(x) { return x % 2 === 0; })));
// -> List [ 30 ]
```
## Running Tests
```
npm install
gulp
mocha build/tests
```
[BSD LICENSE](https://github.com/jlongster/transducers.js/blob/master/LICENSE)
<MSG> Fix typo.
<DFF> @@ -74,7 +74,7 @@ var transform = compose(
);
```
-`compose` is a provided function that simply turns `compose(f, g)` into `x => f(g(x))`. You use it to build up transformations. The above transformation would always run the map and filter **only twice** becaue only two items are needed, and it short-circuits once it gets two items. Again, this is done without laziness, read more [here](http://jlongster.com/Transducers.js--A-JavaScript-Library-for-Transformation-of-Data).
+`compose` is a provided function that simply turns `compose(f, g)` into `x => f(g(x))`. You use it to build up transformations. The above transformation would always run the map and filter **only twice** because only two items are needed, and it short-circuits once it gets two items. Again, this is done without laziness, read more [here](http://jlongster.com/Transducers.js--A-JavaScript-Library-for-Transformation-of-Data).
There are also 2 transducers available for taking collections and "catting" them into the transformation stream:
| 1 | Fix typo. | 1 | .md | md | bsd-2-clause | jlongster/transducers.js |
10071133 | <NME> models.py
<BEF> import os
from django.conf import settings
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.contrib.auth.models import User
OS_NAMES = (
("aix", "AIX"),
("beos", "BeOS"),
("debian", "Debian Linux"),
("dos", "DOS"),
("freebsd", "FreeBSD"),
("hpux", "HP/UX"),
("mac", "Mac System x."),
("macos", "MacOS X"),
("mandrake", "Mandrake Linux"),
("netbsd", "NetBSD"),
("openbsd", "OpenBSD"),
("qnx", "QNX"),
("redhat", "RedHat Linux"),
("solaris", "SUN Solaris"),
("suse", "SuSE Linux"),
("yellowdog", "Yellow Dog Linux"),
)
ARCHITECTURES = (
("alpha", "Alpha"),
("hppa", "HPPA"),
("ix86", "Intel"),
("powerpc", "PowerPC"),
("sparc", "Sparc"),
("ultrasparc", "UltraSparc"),
)
UPLOAD_TO = getattr(settings,
"DJANGOPYPI_RELEASE_UPLOAD_TO", 'dist')
class Classifier(models.Model):
name = models.CharField(max_length=255, unique=True)
class Meta:
verbose_name = _(u"classifier")
verbose_name_plural = _(u"classifiers")
def __unicode__(self):
return self.name
class Project(models.Model):
name = models.CharField(max_length=255, unique=True)
license = models.TextField(blank=True)
metadata_version = models.CharField(max_length=64, default=1.0)
author = models.CharField(max_length=128, blank=True)
home_page = models.URLField(verify_exists=False, blank=True, null=True)
download_url = models.CharField(max_length=200, blank=True, null=True)
summary = models.TextField(blank=True)
description = models.TextField(blank=True)
author_email = models.CharField(max_length=255, blank=True)
classifiers = models.ManyToManyField(Classifier)
owner = models.ForeignKey(User, related_name="projects")
updated = models.DateTimeField(auto_now=True)
class Meta:
verbose_name = _(u"project")
verbose_name_plural = _(u"projects")
def __unicode__(self):
return self.name
@models.permalink
def get_absolute_url(self):
return ('djangopypi-show_links', (), {'dist_name': self.name})
@models.permalink
def get_pypi_absolute_url(self):
return ('djangopypi-pypi_show_links', (), {'dist_name': self.name})
def get_release(self, version):
"""Return the release object for version, or None"""
try:
return self.releases.get(version=version)
except Release.DoesNotExist:
return None
class Release(models.Model):
version = models.CharField(max_length=32)
distribution = models.FileField(upload_to=UPLOAD_TO)
md5_digest = models.CharField(max_length=255, blank=True)
platform = models.CharField(max_length=128, blank=True)
signature = models.CharField(max_length=128, blank=True)
filetype = models.CharField(max_length=255, blank=True)
pyversion = models.CharField(max_length=32, blank=True)
project = models.ForeignKey(Project, related_name="releases")
upload_time = models.DateTimeField(auto_now=True)
class Meta:
verbose_name = _(u"release")
verbose_name_plural = _(u"releases")
unique_together = ("project", "version", "platform", "distribution", "pyversion")
def __unicode__(self):
return u"%s (%s)" % (self.release_name, self.platform)
@property
def type(self):
dist_file_types = {
'sdist':'Source',
'bdist_dumb':'"dumb" binary',
'bdist_rpm':'RPM',
verbose_name_plural = _(u"releases")
def __unicode__(self):
return u"%s %s (%s)" % (self.project.name, self.version, self.platform)
@property
def filename(self):
return os.path.basename(self.distribution.name)
@property
def path(self):
return self.distribution.name
@models.permalink
def get_absolute_url(self):
return ('djangopypi-show_version', (), {'dist_name': self.project, 'version': self.version})
def get_dl_url(self):
return "%s#md5=%s" % (self.distribution.url, self.md5_digest)
<MSG> Added property Release.release_name, returns project name and version in "foo-0.1" format.
<DFF> @@ -110,12 +110,16 @@ class Release(models.Model):
verbose_name_plural = _(u"releases")
def __unicode__(self):
- return u"%s %s (%s)" % (self.project.name, self.version, self.platform)
+ return u"%s (%s)" % (self.release_name, self.platform)
@property
def filename(self):
return os.path.basename(self.distribution.name)
+ @property
+ def release_name(self):
+ return u"%s-%s" % (self.project.name, self.version)
+
@property
def path(self):
return self.distribution.name
| 5 | Added property Release.release_name, returns project name and version in "foo-0.1" format. | 1 | .py | py | bsd-3-clause | ask/chishop |
10071134 | <NME> safemarkup.py
<BEF> from django import template
from django.conf import settings
from django.utils.encoding import smart_str, force_unicode
from django.utils.safestring import mark_safe
register = template.Library()
def saferst(value):
try:
from docutils.core import publish_parts
except ImportError:
return force_unicode(value)
docutils_settings = getattr(settings, "RESTRUCTUREDTEXT_FILTER_SETTINGS",
dict())
try:
parts = publish_parts(source=smart_str(value),
writer_name="html4css1",
settings_overrides=docutils_settings)
except:
return foce_unicode(value)
else:
return mark_safe(force_unicode(parts["fragment"]))
saferst.is_safe = True
register.filter(saferst)
<MSG> Fix typo foce_unicode -> force_unicode
<DFF> @@ -20,7 +20,7 @@ def saferst(value):
writer_name="html4css1",
settings_overrides=docutils_settings)
except:
- return foce_unicode(value)
+ return force_unicode(value)
else:
return mark_safe(force_unicode(parts["fragment"]))
saferst.is_safe = True
| 1 | Fix typo foce_unicode -> force_unicode | 1 | .py | py | bsd-3-clause | ask/chishop |
10071135 | <NME> split.gemspec
<BEF> # -*- encoding: utf-8 -*-
# frozen_string_literal: true
$:.push File.expand_path("../lib", __FILE__)
require "split/version"
Gem::Specification.new do |s|
s.name = "split"
s.version = Split::VERSION
s.platform = Gem::Platform::RUBY
s.authors = ["Andrew Nesbitt"]
s.licenses = ["MIT"]
s.email = ["[email protected]"]
s.homepage = "https://github.com/splitrb/split"
s.summary = "Rack based split testing framework"
s.metadata = {
"homepage_uri" => "https://github.com/splitrb/split",
"changelog_uri" => "https://github.com/splitrb/split/blob/main/CHANGELOG.md",
"source_code_uri" => "https://github.com/splitrb/split",
"bug_tracker_uri" => "https://github.com/splitrb/split/issues",
"wiki_uri" => "https://github.com/splitrb/split/wiki",
"mailing_list_uri" => "https://groups.google.com/d/forum/split-ruby"
}
s.required_ruby_version = ">= 2.5.0"
s.required_rubygems_version = ">= 2.0.0"
s.add_development_dependency 'bundler', '~> 1.10'
s.add_development_dependency 'coveralls', '~> 0.8'
s.add_development_dependency 'rack-test', '~> 0.6'
s.add_development_dependency 'rake', '~> 10.4'
s.add_development_dependency 'rspec', '~> 3.4'
end
s.add_dependency "sinatra", ">= 1.2.6"
s.add_dependency "rubystats", ">= 0.3.0"
s.add_development_dependency "bundler", ">= 1.17"
s.add_development_dependency "simplecov", "~> 0.15"
s.add_development_dependency "rack-test", "~> 2.0"
s.add_development_dependency "rake", "~> 13"
s.add_development_dependency "rspec", "~> 3.7"
s.add_development_dependency "pry", "~> 0.10"
s.add_development_dependency "rails", ">= 5.0"
end
<MSG> Updated rake
Fixes #360
<DFF> @@ -28,6 +28,6 @@ Gem::Specification.new do |s|
s.add_development_dependency 'bundler', '~> 1.10'
s.add_development_dependency 'coveralls', '~> 0.8'
s.add_development_dependency 'rack-test', '~> 0.6'
- s.add_development_dependency 'rake', '~> 10.4'
+ s.add_development_dependency 'rake', '~> 11.1'
s.add_development_dependency 'rspec', '~> 3.4'
end
| 1 | Updated rake | 1 | .gemspec | gemspec | mit | splitrb/split |
10071136 | <NME> split.gemspec
<BEF> # -*- encoding: utf-8 -*-
# frozen_string_literal: true
$:.push File.expand_path("../lib", __FILE__)
require "split/version"
Gem::Specification.new do |s|
s.name = "split"
s.version = Split::VERSION
s.platform = Gem::Platform::RUBY
s.authors = ["Andrew Nesbitt"]
s.licenses = ["MIT"]
s.email = ["[email protected]"]
s.homepage = "https://github.com/splitrb/split"
s.summary = "Rack based split testing framework"
s.metadata = {
"homepage_uri" => "https://github.com/splitrb/split",
"changelog_uri" => "https://github.com/splitrb/split/blob/main/CHANGELOG.md",
"source_code_uri" => "https://github.com/splitrb/split",
"bug_tracker_uri" => "https://github.com/splitrb/split/issues",
"wiki_uri" => "https://github.com/splitrb/split/wiki",
"mailing_list_uri" => "https://groups.google.com/d/forum/split-ruby"
}
s.required_ruby_version = ">= 2.5.0"
s.required_rubygems_version = ">= 2.0.0"
s.add_development_dependency 'bundler', '~> 1.10'
s.add_development_dependency 'coveralls', '~> 0.8'
s.add_development_dependency 'rack-test', '~> 0.6'
s.add_development_dependency 'rake', '~> 10.4'
s.add_development_dependency 'rspec', '~> 3.4'
end
s.add_dependency "sinatra", ">= 1.2.6"
s.add_dependency "rubystats", ">= 0.3.0"
s.add_development_dependency "bundler", ">= 1.17"
s.add_development_dependency "simplecov", "~> 0.15"
s.add_development_dependency "rack-test", "~> 2.0"
s.add_development_dependency "rake", "~> 13"
s.add_development_dependency "rspec", "~> 3.7"
s.add_development_dependency "pry", "~> 0.10"
s.add_development_dependency "rails", ">= 5.0"
end
<MSG> Updated rake
Fixes #360
<DFF> @@ -28,6 +28,6 @@ Gem::Specification.new do |s|
s.add_development_dependency 'bundler', '~> 1.10'
s.add_development_dependency 'coveralls', '~> 0.8'
s.add_development_dependency 'rack-test', '~> 0.6'
- s.add_development_dependency 'rake', '~> 10.4'
+ s.add_development_dependency 'rake', '~> 11.1'
s.add_development_dependency 'rspec', '~> 3.4'
end
| 1 | Updated rake | 1 | .gemspec | gemspec | mit | splitrb/split |
10071137 | <NME> split.gemspec
<BEF> # -*- encoding: utf-8 -*-
# frozen_string_literal: true
$:.push File.expand_path("../lib", __FILE__)
require "split/version"
Gem::Specification.new do |s|
s.name = "split"
s.version = Split::VERSION
s.platform = Gem::Platform::RUBY
s.authors = ["Andrew Nesbitt"]
s.licenses = ["MIT"]
s.email = ["[email protected]"]
s.homepage = "https://github.com/splitrb/split"
s.summary = "Rack based split testing framework"
s.metadata = {
"homepage_uri" => "https://github.com/splitrb/split",
"changelog_uri" => "https://github.com/splitrb/split/blob/main/CHANGELOG.md",
"source_code_uri" => "https://github.com/splitrb/split",
"bug_tracker_uri" => "https://github.com/splitrb/split/issues",
"wiki_uri" => "https://github.com/splitrb/split/wiki",
"mailing_list_uri" => "https://groups.google.com/d/forum/split-ruby"
}
s.required_ruby_version = ">= 2.5.0"
s.required_rubygems_version = ">= 2.0.0"
s.add_development_dependency 'bundler', '~> 1.10'
s.add_development_dependency 'coveralls', '~> 0.8'
s.add_development_dependency 'rack-test', '~> 0.6'
s.add_development_dependency 'rake', '~> 10.4'
s.add_development_dependency 'rspec', '~> 3.4'
end
s.add_dependency "sinatra", ">= 1.2.6"
s.add_dependency "rubystats", ">= 0.3.0"
s.add_development_dependency "bundler", ">= 1.17"
s.add_development_dependency "simplecov", "~> 0.15"
s.add_development_dependency "rack-test", "~> 2.0"
s.add_development_dependency "rake", "~> 13"
s.add_development_dependency "rspec", "~> 3.7"
s.add_development_dependency "pry", "~> 0.10"
s.add_development_dependency "rails", ">= 5.0"
end
<MSG> Updated rake
Fixes #360
<DFF> @@ -28,6 +28,6 @@ Gem::Specification.new do |s|
s.add_development_dependency 'bundler', '~> 1.10'
s.add_development_dependency 'coveralls', '~> 0.8'
s.add_development_dependency 'rack-test', '~> 0.6'
- s.add_development_dependency 'rake', '~> 10.4'
+ s.add_development_dependency 'rake', '~> 11.1'
s.add_development_dependency 'rspec', '~> 3.4'
end
| 1 | Updated rake | 1 | .gemspec | gemspec | mit | splitrb/split |
10071138 | <NME> helper_spec.rb
<BEF> # frozen_string_literal: true
require "spec_helper"
# TODO change some of these tests to use Rack::Test
describe Split::Helper do
include Split::Helper
let(:experiment) {
Split::ExperimentCatalog.find_or_create("link_color", "blue", "red")
}
describe "ab_test" do
it "should not raise an error when passed strings for alternatives" do
expect { ab_test("xyz", "1", "2", "3") }.not_to raise_error
end
it "should not raise an error when passed an array for alternatives" do
expect { ab_test("xyz", ["1", "2", "3"]) }.not_to raise_error
end
it "should raise the appropriate error when passed integers for alternatives" do
expect { ab_test("xyz", 1, 2, 3) }.to raise_error(ArgumentError)
end
it "should raise the appropriate error when passed symbols for alternatives" do
expect { ab_test("xyz", :a, :b, :c) }.to raise_error(ArgumentError)
end
it "should not raise error when passed an array for goals" do
expect { ab_test({ "link_color" => ["purchase", "refund"] }, "blue", "red") }.not_to raise_error
end
it "should not raise error when passed just one goal" do
expect { ab_test({ "link_color" => "purchase" }, "blue", "red") }.not_to raise_error
end
it "raises an appropriate error when processing combined expirements" do
Split.configuration.experiments = {
combined_exp_1: {
alternatives: [ { name: "control", percent: 50 }, { name: "test-alt", percent: 50 } ],
metric: :my_metric,
combined_experiments: [:combined_exp_1_sub_1]
}
}
Split::ExperimentCatalog.find_or_create("combined_exp_1")
expect { ab_test("combined_exp_1") }.to raise_error(Split::InvalidExperimentsFormatError)
end
it "should assign a random alternative to a new user when there are an equal number of alternatives assigned" do
ab_test("link_color", "blue", "red")
expect(["red", "blue"]).to include(ab_user["link_color"])
end
it "should increment the participation counter after assignment to a new user" do
previous_red_count = Split::Alternative.new("red", "link_color").participant_count
previous_blue_count = Split::Alternative.new("blue", "link_color").participant_count
ab_test("link_color", "blue", "red")
new_red_count = Split::Alternative.new("red", "link_color").participant_count
new_blue_count = Split::Alternative.new("blue", "link_color").participant_count
expect((new_red_count + new_blue_count)).to eq(previous_red_count + previous_blue_count + 1)
end
it "should not increment the counter for an experiment that the user is not participating in" do
ab_test("link_color", "blue", "red")
e = Split::ExperimentCatalog.find_or_create("button_size", "small", "big")
expect {
# User shouldn't participate in this second experiment
ab_test("button_size", "small", "big")
}.not_to change { e.participant_count }
end
it "should not increment the counter for an ended experiment" do
e = Split::ExperimentCatalog.find_or_create("button_size", "small", "big")
e.winner = "small"
expect {
a = ab_test("button_size", "small", "big")
expect(a).to eq("small")
}.not_to change { e.participant_count }
end
it "should not increment the counter for an not started experiment" do
expect(Split.configuration).to receive(:start_manually).and_return(true)
e = Split::ExperimentCatalog.find_or_create("button_size", "small", "big")
expect {
a = ab_test("button_size", "small", "big")
expect(a).to eq("small")
}.not_to change { e.participant_count }
end
it "should return the given alternative for an existing user" do
expect(ab_test("link_color", "blue", "red")).to eq ab_test("link_color", "blue", "red")
end
it "should always return the winner if one is present" do
experiment.winner = "orange"
expect(ab_test("link_color", "blue", "red")).to eq("orange")
end
it "should allow the alternative to be forced by passing it in the params" do
# ?ab_test[link_color]=blue
@params = { "ab_test" => { "link_color" => "blue" } }
alternative = ab_test("link_color", "blue", "red")
expect(alternative).to eq("blue")
alternative = ab_test("link_color", { "blue" => 1 }, "red" => 5)
expect(alternative).to eq("blue")
@params = { "ab_test" => { "link_color" => "red" } }
alternative = ab_test("link_color", "blue", "red")
expect(alternative).to eq("red")
alternative = ab_test("link_color", { "blue" => 5 }, "red" => 1)
expect(alternative).to eq("red")
end
it "should not allow an arbitrary alternative" do
@params = { "ab_test" => { "link_color" => "pink" } }
alternative = ab_test("link_color", "blue")
expect(alternative).to eq("blue")
end
it "should not store the split when a param forced alternative" do
@params = { "ab_test" => { "link_color" => "blue" } }
expect(ab_user).not_to receive(:[]=)
ab_test("link_color", "blue", "red")
end
it "SPLIT_DISABLE query parameter should also force the alternative (uses control)" do
@params = { "SPLIT_DISABLE" => "true" }
alternative = ab_test("link_color", "blue", "red")
expect(alternative).to eq("blue")
alternative = ab_test("link_color", { "blue" => 1 }, "red" => 5)
expect(alternative).to eq("blue")
alternative = ab_test("link_color", "red", "blue")
expect(alternative).to eq("red")
alternative = ab_test("link_color", { "red" => 5 }, "blue" => 1)
expect(alternative).to eq("red")
end
it "should not store the split when Split generically disabled" do
@params = { "SPLIT_DISABLE" => "true" }
expect(ab_user).not_to receive(:[]=)
ab_test("link_color", "blue", "red")
end
context "when store_override is set" do
before { Split.configuration.store_override = true }
it "should store the forced alternative" do
@params = { "ab_test" => { "link_color" => "blue" } }
expect(ab_user).to receive(:[]=).with("link_color", "blue")
ab_test("link_color", "blue", "red")
end
end
context "when on_trial_choose is set" do
before { Split.configuration.on_trial_choose = :some_method }
it "should call the method" do
expect(self).to receive(:some_method)
ab_test("link_color", "blue", "red")
end
end
it "should allow passing a block" do
alt = ab_test("link_color", "blue", "red")
ret = ab_test("link_color", "blue", "red") { |alternative| "shared/#{alternative}" }
expect(ret).to eq("shared/#{alt}")
end
it "should allow the share of visitors see an alternative to be specified" do
ab_test("link_color", { "blue" => 0.8 }, { "red" => 20 })
expect(["red", "blue"]).to include(ab_user["link_color"])
end
it "should allow alternative weighting interface as a single hash" do
ab_test("link_color", { "blue" => 0.01 }, "red" => 0.2)
end
end
end
end
it "should select the correct alternatives after experiment resets" do
experiment = Split::ExperimentCatalog.find(:test_0)
experiment.reset
mock_user[experiment.key] = "test-alt"
expect(ab_user.active_experiments.size).to eq 1
expect(ab_test(:test_0, { "control" => 100 }, { "test-alt" => 1 })).to eq "test-alt"
expect(ab_test(:test_0, { "control" => 0 }, { "test-alt" => 100 })).to eq "test-alt"
end
it "lets override existing choice" do
pending "this requires user store reset on first call not depending on whelther it is current trial"
@params = { "ab_test" => { "test_1" => "test-alt" } }
expect(ab_test(:test_0, { "control" => 0 }, { "test-alt" => 100 })).to eq "control"
expect(ab_test(:test_1, { "control" => 100 }, { "test-alt" => 1 })).to eq "test-alt"
end
end
end
it "should not over-write a finished key when an experiment is on a later version" do
experiment.increment_version
ab_user = { experiment.key => "blue", experiment.finished_key => true }
finished_session = ab_user.dup
ab_test("link_color", "blue", "red")
expect(ab_user).to eq(finished_session)
end
end
describe "metadata" do
context "is defined" do
before do
Split.configuration.experiments = {
my_experiment: {
alternatives: ["one", "two"],
resettable: false,
metadata: { "one" => "Meta1", "two" => "Meta2" }
}
}
end
it "should be passed to helper block" do
@params = { "ab_test" => { "my_experiment" => "two" } }
expect(ab_test("my_experiment")).to eq "two"
expect(ab_test("my_experiment") do |alternative, meta|
meta
end).to eq("Meta2")
end
it "should pass control metadata helper block if library disabled" do
Split.configure do |config|
config.enabled = false
end
expect(ab_test("my_experiment")).to eq "one"
expect(ab_test("my_experiment") do |_, meta|
meta
end).to eq("Meta1")
end
end
context "is not defined" do
before do
Split.configuration.experiments = {
my_experiment: {
alternatives: ["one", "two"],
resettable: false,
metadata: nil
}
}
end
it "should be passed to helper block" do
expect(ab_test("my_experiment") do |alternative, meta|
meta
end).to eq({})
end
it "should pass control metadata helper block if library disabled" do
Split.configure do |config|
config.enabled = false
end
expect(ab_test("my_experiment") do |_, meta|
meta
end).to eq({})
end
end
end
describe "ab_finished" do
context "for an experiment that the user participates in" do
before(:each) do
@experiment_name = "link_color"
@alternatives = ["blue", "red"]
@experiment = Split::ExperimentCatalog.find_or_create(@experiment_name, *@alternatives)
@alternative_name = ab_test(@experiment_name, *@alternatives)
@previous_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count
end
it "should increment the counter for the completed alternative" do
ab_finished(@experiment_name)
new_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count
expect(new_completion_count).to eq(@previous_completion_count + 1)
end
it "should set experiment's finished key if reset is false" do
ab_finished(@experiment_name, { reset: false })
expect(ab_user[@experiment.key]).to eq(@alternative_name)
expect(ab_user[@experiment.finished_key]).to eq(true)
end
it "should not increment the counter if reset is false and the experiment has been already finished" do
2.times { ab_finished(@experiment_name, { reset: false }) }
new_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count
expect(new_completion_count).to eq(@previous_completion_count + 1)
end
it "should not increment the counter for an ended experiment" do
e = Split::ExperimentCatalog.find_or_create("button_size", "small", "big")
e.winner = "small"
a = ab_test("button_size", "small", "big")
expect(a).to eq("small")
expect {
ab_finished("button_size")
}.not_to change { Split::Alternative.new(a, "button_size").completed_count }
end
it "should clear out the user's participation from their session" do
expect(ab_user[@experiment.key]).to eq(@alternative_name)
ab_finished(@experiment_name)
expect(ab_user.keys).to be_empty
end
it "should not clear out the users session if reset is false" do
expect(ab_user[@experiment.key]).to eq(@alternative_name)
ab_finished(@experiment_name, { reset: false })
expect(ab_user[@experiment.key]).to eq(@alternative_name)
expect(ab_user[@experiment.finished_key]).to eq(true)
end
it "should reset the users session when experiment is not versioned" do
expect(ab_user[@experiment.key]).to eq(@alternative_name)
ab_finished(@experiment_name)
expect(ab_user.keys).to be_empty
end
it "should reset the users session when experiment is versioned" do
@experiment.increment_version
@alternative_name = ab_test(@experiment_name, *@alternatives)
expect(ab_user[@experiment.key]).to eq(@alternative_name)
ab_finished(@experiment_name)
expect(ab_user.keys).to be_empty
end
context "when on_trial_complete is set" do
before { Split.configuration.on_trial_complete = :some_method }
it "should call the method" do
expect(self).to receive(:some_method)
ab_finished(@experiment_name)
end
it "should not call the method without alternative" do
ab_user[@experiment.key] = nil
expect(self).not_to receive(:some_method)
ab_finished(@experiment_name)
end
end
end
context "for an experiment that the user is excluded from" do
before do
alternative = ab_test("link_color", "blue", "red")
expect(Split::Alternative.new(alternative, "link_color").participant_count).to eq(1)
alternative = ab_test("button_size", "small", "big")
expect(Split::Alternative.new(alternative, "button_size").participant_count).to eq(0)
end
it "should not increment the completed counter" do
# So, user should be participating in the link_color experiment and
# receive the control for button_size. As the user is not participating in
# the button size experiment, finishing it should not increase the
# completion count for that alternative.
expect {
ab_finished("button_size")
}.not_to change { Split::Alternative.new("small", "button_size").completed_count }
end
end
context "for an experiment that the user does not participate in" do
before do
Split::ExperimentCatalog.find_or_create(:not_started_experiment, "control", "alt")
end
it "should not raise an exception" do
expect { ab_finished(:not_started_experiment) }.not_to raise_exception
end
it "should not change the user state when reset is false" do
expect { ab_finished(:not_started_experiment, reset: false) }.not_to change { ab_user.keys }.from([])
end
it "should not change the user state when reset is true" do
expect(self).not_to receive(:reset!)
ab_finished(:not_started_experiment)
end
it "should not increment the completed counter" do
ab_finished(:not_started_experiment)
expect(Split::Alternative.new("control", :not_started_experiment).completed_count).to eq(0)
expect(Split::Alternative.new("alt", :not_started_experiment).completed_count).to eq(0)
end
end
end
context "finished with config" do
it "passes reset option" do
Split.configuration.experiments = {
my_experiment: {
alternatives: ["one", "two"],
resettable: false,
}
}
alternative = ab_test(:my_experiment)
experiment = Split::ExperimentCatalog.find :my_experiment
ab_finished :my_experiment
expect(ab_user[experiment.key]).to eq(alternative)
expect(ab_user[experiment.finished_key]).to eq(true)
end
end
context "finished with metric name" do
before { Split.configuration.experiments = {} }
before { expect(Split::Alternative).to receive(:new).at_least(1).times.and_call_original }
def should_finish_experiment(experiment_name, should_finish = true)
alts = Split.configuration.experiments[experiment_name][:alternatives]
experiment = Split::ExperimentCatalog.find_or_create(experiment_name, *alts)
alt_name = ab_user[experiment.key] = alts.first
alt = double("alternative")
expect(alt).to receive(:name).at_most(1).times.and_return(alt_name)
expect(Split::Alternative).to receive(:new).at_most(1).times.with(alt_name, experiment_name.to_s).and_return(alt)
if should_finish
expect(alt).to receive(:increment_completion).at_most(1).times
else
expect(alt).not_to receive(:increment_completion)
end
end
it "completes the test" do
Split.configuration.experiments[:my_experiment] = {
alternatives: [ "control_opt", "other_opt" ],
metric: :my_metric
}
should_finish_experiment :my_experiment
ab_finished :my_metric
end
it "completes all relevant tests" do
Split.configuration.experiments = {
exp_1: {
alternatives: [ "1-1", "1-2" ],
metric: :my_metric
},
exp_2: {
alternatives: [ "2-1", "2-2" ],
metric: :another_metric
},
exp_3: {
alternatives: [ "3-1", "3-2" ],
metric: :my_metric
},
}
should_finish_experiment :exp_1
should_finish_experiment :exp_2, false
should_finish_experiment :exp_3
ab_finished :my_metric
end
it "passes reset option" do
Split.configuration.experiments = {
my_exp: {
alternatives: ["one", "two"],
metric: :my_metric,
resettable: false,
}
}
alternative_name = ab_test(:my_exp)
exp = Split::ExperimentCatalog.find :my_exp
ab_finished :my_metric
expect(ab_user[exp.key]).to eq(alternative_name)
expect(ab_user[exp.finished_key]).to be_truthy
end
it "passes through options" do
Split.configuration.experiments = {
my_exp: {
alternatives: ["one", "two"],
metric: :my_metric,
}
}
alternative_name = ab_test(:my_exp)
exp = Split::ExperimentCatalog.find :my_exp
ab_finished :my_metric, reset: false
expect(ab_user[exp.key]).to eq(alternative_name)
expect(ab_user[exp.finished_key]).to be_truthy
end
end
describe "conversions" do
it "should return a conversion rate for an alternative" do
alternative_name = ab_test("link_color", "blue", "red")
previous_convertion_rate = Split::Alternative.new(alternative_name, "link_color").conversion_rate
expect(previous_convertion_rate).to eq(0.0)
ab_finished("link_color")
new_convertion_rate = Split::Alternative.new(alternative_name, "link_color").conversion_rate
expect(new_convertion_rate).to eq(1.0)
end
end
describe "active experiments" do
it "should show an active test" do
alternative = ab_test("def", "4", "5", "6")
expect(active_experiments.count).to eq 1
expect(active_experiments.first[0]).to eq "def"
expect(active_experiments.first[1]).to eq alternative
end
it "should show a finished test" do
alternative = ab_test("def", "4", "5", "6")
ab_finished("def", { reset: false })
expect(active_experiments.count).to eq 1
expect(active_experiments.first[0]).to eq "def"
expect(active_experiments.first[1]).to eq alternative
end
it "should show an active test when an experiment is on a later version" do
experiment.reset
expect(experiment.version).to eq(1)
ab_test("link_color", "blue", "red")
expect(active_experiments.count).to eq 1
expect(active_experiments.first[0]).to eq "link_color"
end
it "should show versioned tests properly" do
10.times { experiment.reset }
alternative = ab_test(experiment.name, "blue", "red")
ab_finished(experiment.name, reset: false)
expect(experiment.version).to eq(10)
expect(active_experiments.count).to eq 1
expect(active_experiments).to eq({ "link_color" => alternative })
end
it "should show multiple tests" do
Split.configure do |config|
config.allow_multiple_experiments = true
end
alternative = ab_test("def", "4", "5", "6")
another_alternative = ab_test("ghi", "7", "8", "9")
expect(active_experiments.count).to eq 2
expect(active_experiments["def"]).to eq alternative
expect(active_experiments["ghi"]).to eq another_alternative
end
it "should not show tests with winners" do
Split.configure do |config|
config.allow_multiple_experiments = true
end
e = Split::ExperimentCatalog.find_or_create("def", "4", "5", "6")
e.winner = "4"
ab_test("def", "4", "5", "6")
another_alternative = ab_test("ghi", "7", "8", "9")
expect(active_experiments.count).to eq 1
expect(active_experiments.first[0]).to eq "ghi"
expect(active_experiments.first[1]).to eq another_alternative
end
end
describe "when user is a robot" do
before(:each) do
@request = OpenStruct.new(user_agent: "Googlebot/2.1 (+http://www.google.com/bot.html)")
end
describe "ab_test" do
it "should return the control" do
alternative = ab_test("link_color", "blue", "red")
expect(alternative).to eq experiment.control.name
end
it "should not create a experiment" do
ab_test("link_color", "blue", "red")
expect(Split::Experiment.new("link_color")).to be_a_new_record
end
it "should not increment the participation count" do
previous_red_count = Split::Alternative.new("red", "link_color").participant_count
previous_blue_count = Split::Alternative.new("blue", "link_color").participant_count
ab_test("link_color", "blue", "red")
new_red_count = Split::Alternative.new("red", "link_color").participant_count
new_blue_count = Split::Alternative.new("blue", "link_color").participant_count
expect((new_red_count + new_blue_count)).to eq(previous_red_count + previous_blue_count)
end
end
describe "finished" do
it "should not increment the completed count" do
alternative_name = ab_test("link_color", "blue", "red")
previous_completion_count = Split::Alternative.new(alternative_name, "link_color").completed_count
ab_finished("link_color")
new_completion_count = Split::Alternative.new(alternative_name, "link_color").completed_count
expect(new_completion_count).to eq(previous_completion_count)
end
end
end
describe "when providing custom ignore logic" do
context "using a proc to configure custom logic" do
before(:each) do
Split.configure do |c|
c.ignore_filter = proc { |request| true } # ignore everything
end
end
it "ignores the ab_test" do
ab_test("link_color", "blue", "red")
red_count = Split::Alternative.new("red", "link_color").participant_count
blue_count = Split::Alternative.new("blue", "link_color").participant_count
expect((red_count + blue_count)).to be(0)
end
end
end
shared_examples_for "a disabled test" do
describe "ab_test" do
it "should return the control" do
alternative = ab_test("link_color", "blue", "red")
expect(alternative).to eq experiment.control.name
end
it "should not increment the participation count" do
previous_red_count = Split::Alternative.new("red", "link_color").participant_count
previous_blue_count = Split::Alternative.new("blue", "link_color").participant_count
ab_test("link_color", "blue", "red")
new_red_count = Split::Alternative.new("red", "link_color").participant_count
new_blue_count = Split::Alternative.new("blue", "link_color").participant_count
expect((new_red_count + new_blue_count)).to eq(previous_red_count + previous_blue_count)
end
end
describe "finished" do
it "should not increment the completed count" do
alternative_name = ab_test("link_color", "blue", "red")
previous_completion_count = Split::Alternative.new(alternative_name, "link_color").completed_count
ab_finished("link_color")
new_completion_count = Split::Alternative.new(alternative_name, "link_color").completed_count
expect(new_completion_count).to eq(previous_completion_count)
end
end
end
describe "when ip address is ignored" do
context "individually" do
before(:each) do
@request = OpenStruct.new(ip: "81.19.48.130")
Split.configure do |c|
c.ignore_ip_addresses << "81.19.48.130"
end
end
it_behaves_like "a disabled test"
end
context "for a range" do
before(:each) do
@request = OpenStruct.new(ip: "81.19.48.129")
Split.configure do |c|
c.ignore_ip_addresses << /81\.19\.48\.[0-9]+/
end
end
it_behaves_like "a disabled test"
end
context "using both a range and a specific value" do
before(:each) do
@request = OpenStruct.new(ip: "81.19.48.128")
Split.configure do |c|
c.ignore_ip_addresses << "81.19.48.130"
c.ignore_ip_addresses << /81\.19\.48\.[0-9]+/
end
end
it_behaves_like "a disabled test"
end
context "when ignored other address" do
before do
@request = OpenStruct.new(ip: "1.1.1.1")
Split.configure do |c|
c.ignore_ip_addresses << "81.19.48.130"
end
end
it "works as usual" do
alternative_name = ab_test("link_color", "red", "blue")
expect {
ab_finished("link_color")
}.to change(Split::Alternative.new(alternative_name, "link_color"), :completed_count).by(1)
end
end
end
describe "when user is previewing" do
before(:each) do
@request = OpenStruct.new(headers: { "x-purpose" => "preview" })
end
it_behaves_like "a disabled test"
end
describe "versioned experiments" do
it "should use version zero if no version is present" do
alternative_name = ab_test("link_color", "blue", "red")
expect(experiment.version).to eq(0)
expect(ab_user["link_color"]).to eq(alternative_name)
end
it "should save the version of the experiment to the session" do
experiment.reset
expect(experiment.version).to eq(1)
alternative_name = ab_test("link_color", "blue", "red")
expect(ab_user["link_color:1"]).to eq(alternative_name)
end
it "should load the experiment even if the version is not 0" do
experiment.reset
expect(experiment.version).to eq(1)
alternative_name = ab_test("link_color", "blue", "red")
expect(ab_user["link_color:1"]).to eq(alternative_name)
return_alternative_name = ab_test("link_color", "blue", "red")
expect(return_alternative_name).to eq(alternative_name)
end
it "should reset the session of a user on an older version of the experiment" do
alternative_name = ab_test("link_color", "blue", "red")
expect(ab_user["link_color"]).to eq(alternative_name)
alternative = Split::Alternative.new(alternative_name, "link_color")
expect(alternative.participant_count).to eq(1)
experiment.reset
expect(experiment.version).to eq(1)
alternative = Split::Alternative.new(alternative_name, "link_color")
expect(alternative.participant_count).to eq(0)
new_alternative_name = ab_test("link_color", "blue", "red")
expect(ab_user["link_color:1"]).to eq(new_alternative_name)
new_alternative = Split::Alternative.new(new_alternative_name, "link_color")
expect(new_alternative.participant_count).to eq(1)
end
it "should cleanup old versions of experiments from the session" do
alternative_name = ab_test("link_color", "blue", "red")
expect(ab_user["link_color"]).to eq(alternative_name)
alternative = Split::Alternative.new(alternative_name, "link_color")
expect(alternative.participant_count).to eq(1)
experiment.reset
expect(experiment.version).to eq(1)
alternative = Split::Alternative.new(alternative_name, "link_color")
expect(alternative.participant_count).to eq(0)
new_alternative_name = ab_test("link_color", "blue", "red")
expect(ab_user["link_color:1"]).to eq(new_alternative_name)
end
it "should only count completion of users on the current version" do
alternative_name = ab_test("link_color", "blue", "red")
expect(ab_user["link_color"]).to eq(alternative_name)
Split::Alternative.new(alternative_name, "link_color")
experiment.reset
expect(experiment.version).to eq(1)
ab_finished("link_color")
alternative = Split::Alternative.new(alternative_name, "link_color")
expect(alternative.completed_count).to eq(0)
end
end
context "when redis is not available" do
before(:each) do
expect(Split).to receive(:redis).at_most(5).times.and_raise(Errno::ECONNREFUSED.new)
end
context "and db_failover config option is turned off" do
before(:each) do
Split.configure do |config|
config.db_failover = false
end
end
describe "ab_test" do
it "should raise an exception" do
expect { ab_test("link_color", "blue", "red") }.to raise_error(Errno::ECONNREFUSED)
end
end
describe "finished" do
it "should raise an exception" do
expect { ab_finished("link_color") }.to raise_error(Errno::ECONNREFUSED)
end
end
describe "disable split testing" do
before(:each) do
Split.configure do |config|
config.enabled = false
end
end
it "should not attempt to connect to redis" do
expect { ab_test("link_color", "blue", "red") }.not_to raise_error
end
it "should return control variable" do
expect(ab_test("link_color", "blue", "red")).to eq("blue")
expect { ab_finished("link_color") }.not_to raise_error
end
end
end
context "and db_failover config option is turned on" do
before(:each) do
Split.configure do |config|
config.db_failover = true
end
end
describe "ab_test" do
it "should not raise an exception" do
expect { ab_test("link_color", "blue", "red") }.not_to raise_error
end
it "should call db_failover_on_db_error proc with error as parameter" do
Split.configure do |config|
config.db_failover_on_db_error = proc do |error|
expect(error).to be_a(Errno::ECONNREFUSED)
end
end
expect(Split.configuration.db_failover_on_db_error).to receive(:call).and_call_original
ab_test("link_color", "blue", "red")
end
it "should always use first alternative" do
expect(ab_test("link_color", "blue", "red")).to eq("blue")
expect(ab_test("link_color", { "blue" => 0.01 }, "red" => 0.2)).to eq("blue")
expect(ab_test("link_color", { "blue" => 0.8 }, { "red" => 20 })).to eq("blue")
expect(ab_test("link_color", "blue", "red") do |alternative|
"shared/#{alternative}"
end).to eq("shared/blue")
end
context "and db_failover_allow_parameter_override config option is turned on" do
before(:each) do
Split.configure do |config|
config.db_failover_allow_parameter_override = true
end
end
context "and given an override parameter" do
it "should use given override instead of the first alternative" do
@params = { "ab_test" => { "link_color" => "red" } }
expect(ab_test("link_color", "blue", "red")).to eq("red")
expect(ab_test("link_color", "blue", "red", "green")).to eq("red")
expect(ab_test("link_color", { "blue" => 0.01 }, "red" => 0.2)).to eq("red")
expect(ab_test("link_color", { "blue" => 0.8 }, { "red" => 20 })).to eq("red")
expect(ab_test("link_color", "blue", "red") do |alternative|
"shared/#{alternative}"
end).to eq("shared/red")
end
end
end
context "and preloaded config given" do
before do
Split.configuration.experiments[:link_color] = {
alternatives: [ "blue", "red" ],
}
end
it "uses first alternative" do
expect(ab_test(:link_color)).to eq("blue")
end
end
end
describe "finished" do
it "should not raise an exception" do
expect { ab_finished("link_color") }.not_to raise_error
end
it "should call db_failover_on_db_error proc with error as parameter" do
Split.configure do |config|
config.db_failover_on_db_error = proc do |error|
expect(error).to be_a(Errno::ECONNREFUSED)
end
end
expect(Split.configuration.db_failover_on_db_error).to receive(:call).and_call_original
ab_finished("link_color")
end
end
end
end
context "with preloaded config" do
before { Split.configuration.experiments = {} }
it "pulls options from config file" do
Split.configuration.experiments[:my_experiment] = {
alternatives: [ "control_opt", "other_opt" ],
goals: ["goal1", "goal2"]
}
ab_test :my_experiment
expect(Split::Experiment.new(:my_experiment).alternatives.map(&:name)).to eq([ "control_opt", "other_opt" ])
expect(Split::Experiment.new(:my_experiment).goals).to eq([ "goal1", "goal2" ])
end
it "can be called multiple times" do
Split.configuration.experiments[:my_experiment] = {
alternatives: [ "control_opt", "other_opt" ],
goals: ["goal1", "goal2"]
}
5.times { ab_test :my_experiment }
experiment = Split::Experiment.new(:my_experiment)
expect(experiment.alternatives.map(&:name)).to eq([ "control_opt", "other_opt" ])
expect(experiment.goals).to eq([ "goal1", "goal2" ])
expect(experiment.participant_count).to eq(1)
end
it "accepts multiple goals" do
Split.configuration.experiments[:my_experiment] = {
alternatives: [ "control_opt", "other_opt" ],
goals: [ "goal1", "goal2", "goal3" ]
}
ab_test :my_experiment
experiment = Split::Experiment.new(:my_experiment)
expect(experiment.goals).to eq([ "goal1", "goal2", "goal3" ])
end
it "allow specifying goals to be optional" do
Split.configuration.experiments[:my_experiment] = {
alternatives: [ "control_opt", "other_opt" ]
}
experiment = Split::Experiment.new(:my_experiment)
expect(experiment.goals).to eq([])
end
it "accepts multiple alternatives" do
Split.configuration.experiments[:my_experiment] = {
alternatives: [ "control_opt", "second_opt", "third_opt" ],
}
ab_test :my_experiment
experiment = Split::Experiment.new(:my_experiment)
expect(experiment.alternatives.map(&:name)).to eq([ "control_opt", "second_opt", "third_opt" ])
end
it "accepts probability on alternatives" do
Split.configuration.experiments[:my_experiment] = {
alternatives: [
{ name: "control_opt", percent: 67 },
{ name: "second_opt", percent: 10 },
{ name: "third_opt", percent: 23 },
],
}
ab_test :my_experiment
experiment = Split::Experiment.new(:my_experiment)
expect(experiment.alternatives.collect { |a| [a.name, a.weight] }).to eq([["control_opt", 0.67], ["second_opt", 0.1], ["third_opt", 0.23]])
end
it "accepts probability on some alternatives" do
Split.configuration.experiments[:my_experiment] = {
alternatives: [
{ name: "control_opt", percent: 34 },
"second_opt",
{ name: "third_opt", percent: 23 },
"fourth_opt",
],
}
ab_test :my_experiment
experiment = Split::Experiment.new(:my_experiment)
names_and_weights = experiment.alternatives.collect { |a| [a.name, a.weight] }
expect(names_and_weights).to eq([["control_opt", 0.34], ["second_opt", 0.215], ["third_opt", 0.23], ["fourth_opt", 0.215]])
expect(names_and_weights.inject(0) { |sum, nw| sum + nw[1] }).to eq(1.0)
end
it "allows name param without probability" do
Split.configuration.experiments[:my_experiment] = {
alternatives: [
{ name: "control_opt" },
"second_opt",
{ name: "third_opt", percent: 64 },
],
}
ab_test :my_experiment
experiment = Split::Experiment.new(:my_experiment)
names_and_weights = experiment.alternatives.collect { |a| [a.name, a.weight] }
expect(names_and_weights).to eq([["control_opt", 0.18], ["second_opt", 0.18], ["third_opt", 0.64]])
expect(names_and_weights.inject(0) { |sum, nw| sum + nw[1] }).to eq(1.0)
end
it "fails gracefully if config is missing experiment" do
Split.configuration.experiments = { other_experiment: { foo: "Bar" } }
expect { ab_test :my_experiment }.to raise_error(Split::ExperimentNotFound)
end
it "fails gracefully if config is missing" do
expect { Split.configuration.experiments = nil }.to raise_error(Split::InvalidExperimentsFormatError)
end
it "fails gracefully if config is missing alternatives" do
Split.configuration.experiments[:my_experiment] = { foo: "Bar" }
expect { ab_test :my_experiment }.to raise_error(NoMethodError)
end
end
it "should handle multiple experiments correctly" do
experiment2 = Split::ExperimentCatalog.find_or_create("link_color2", "blue", "red")
ab_test("link_color", "blue", "red")
ab_test("link_color2", "blue", "red")
ab_finished("link_color2")
experiment2.alternatives.each do |alt|
expect(alt.unfinished_count).to eq(0)
end
end
context "with goals" do
before do
@experiment = { "link_color" => ["purchase", "refund"] }
@alternatives = ["blue", "red"]
@experiment_name, @goals = normalize_metric(@experiment)
@goal1 = @goals[0]
@goal2 = @goals[1]
end
it "should normalize experiment" do
expect(@experiment_name).to eq("link_color")
expect(@goals).to eq(["purchase", "refund"])
end
describe "ab_test" do
it "should allow experiment goals interface as a single hash" do
ab_test(@experiment, *@alternatives)
experiment = Split::ExperimentCatalog.find("link_color")
expect(experiment.goals).to eq(["purchase", "refund"])
end
end
describe "ab_finished" do
before do
@alternative_name = ab_test(@experiment, *@alternatives)
end
it "should increment the counter for the specified-goal completed alternative" do
expect { ab_finished({ "link_color" => ["purchase"] }) }
.to change { Split::Alternative.new(@alternative_name, @experiment_name).completed_count(@goal2) }.by(0)
.and change { Split::Alternative.new(@alternative_name, @experiment_name).completed_count(@goal1) }.by(1)
end
end
end
end
<MSG> first pass at versioning experiments for better resets
<DFF> @@ -185,4 +185,63 @@ describe Split::Helper do
end
end
end
+
+ describe 'versioned experiments' do
+ it "should use version zero if no version is present" do
+ experiment = Split::Experiment.find_or_create('link_color', 'blue', 'red')
+ alternative_name = ab_test('link_color', 'blue', 'red')
+ experiment.version.should eql(0)
+ session[:split].should eql({'link_color' => alternative_name})
+ end
+
+ it "should save the version of the experiment to the session" do
+ experiment = Split::Experiment.find_or_create('link_color', 'blue', 'red')
+ experiment.reset
+ experiment.version.should eql(1)
+ alternative_name = ab_test('link_color', 'blue', 'red')
+ session[:split].should eql({'link_color:1' => alternative_name})
+ end
+
+ it "should load the experiment even if the version is not 0" do
+ experiment = Split::Experiment.find_or_create('link_color', 'blue', 'red')
+ experiment.reset
+ experiment.version.should eql(1)
+ alternative_name = ab_test('link_color', 'blue', 'red')
+ session[:split].should eql({'link_color:1' => alternative_name})
+ return_alternative_name = ab_test('link_color', 'blue', 'red')
+ return_alternative_name.should eql(alternative_name)
+ end
+
+ it "should reset the session of a user on an older version of the experiment" do
+ experiment = Split::Experiment.find_or_create('link_color', 'blue', 'red')
+ alternative_name = ab_test('link_color', 'blue', 'red')
+ session[:split].should eql({'link_color' => alternative_name})
+ alternative = Split::Alternative.find(alternative_name, 'link_color')
+ alternative.participant_count.should eql(1)
+
+ experiment.reset
+ experiment.version.should eql(1)
+ alternative = Split::Alternative.find(alternative_name, 'link_color')
+ alternative.participant_count.should eql(0)
+
+ new_alternative_name = ab_test('link_color', 'blue', 'red')
+ session[:split]['link_color:1'].should eql(new_alternative_name)
+ new_alternative = Split::Alternative.find(new_alternative_name, 'link_color')
+ new_alternative.participant_count.should eql(1)
+ end
+
+ it "should only count completion of users on the current version" do
+ experiment = Split::Experiment.find_or_create('link_color', 'blue', 'red')
+ alternative_name = ab_test('link_color', 'blue', 'red')
+ session[:split].should eql({'link_color' => alternative_name})
+ alternative = Split::Alternative.find(alternative_name, 'link_color')
+
+ experiment.reset
+ experiment.version.should eql(1)
+
+ finished('link_color')
+ alternative = Split::Alternative.find(alternative_name, 'link_color')
+ alternative.completed_count.should eql(0)
+ end
+ end
end
\ No newline at end of file
| 59 | first pass at versioning experiments for better resets | 0 | .rb | rb | mit | splitrb/split |
10071139 | <NME> helper_spec.rb
<BEF> # frozen_string_literal: true
require "spec_helper"
# TODO change some of these tests to use Rack::Test
describe Split::Helper do
include Split::Helper
let(:experiment) {
Split::ExperimentCatalog.find_or_create("link_color", "blue", "red")
}
describe "ab_test" do
it "should not raise an error when passed strings for alternatives" do
expect { ab_test("xyz", "1", "2", "3") }.not_to raise_error
end
it "should not raise an error when passed an array for alternatives" do
expect { ab_test("xyz", ["1", "2", "3"]) }.not_to raise_error
end
it "should raise the appropriate error when passed integers for alternatives" do
expect { ab_test("xyz", 1, 2, 3) }.to raise_error(ArgumentError)
end
it "should raise the appropriate error when passed symbols for alternatives" do
expect { ab_test("xyz", :a, :b, :c) }.to raise_error(ArgumentError)
end
it "should not raise error when passed an array for goals" do
expect { ab_test({ "link_color" => ["purchase", "refund"] }, "blue", "red") }.not_to raise_error
end
it "should not raise error when passed just one goal" do
expect { ab_test({ "link_color" => "purchase" }, "blue", "red") }.not_to raise_error
end
it "raises an appropriate error when processing combined expirements" do
Split.configuration.experiments = {
combined_exp_1: {
alternatives: [ { name: "control", percent: 50 }, { name: "test-alt", percent: 50 } ],
metric: :my_metric,
combined_experiments: [:combined_exp_1_sub_1]
}
}
Split::ExperimentCatalog.find_or_create("combined_exp_1")
expect { ab_test("combined_exp_1") }.to raise_error(Split::InvalidExperimentsFormatError)
end
it "should assign a random alternative to a new user when there are an equal number of alternatives assigned" do
ab_test("link_color", "blue", "red")
expect(["red", "blue"]).to include(ab_user["link_color"])
end
it "should increment the participation counter after assignment to a new user" do
previous_red_count = Split::Alternative.new("red", "link_color").participant_count
previous_blue_count = Split::Alternative.new("blue", "link_color").participant_count
ab_test("link_color", "blue", "red")
new_red_count = Split::Alternative.new("red", "link_color").participant_count
new_blue_count = Split::Alternative.new("blue", "link_color").participant_count
expect((new_red_count + new_blue_count)).to eq(previous_red_count + previous_blue_count + 1)
end
it "should not increment the counter for an experiment that the user is not participating in" do
ab_test("link_color", "blue", "red")
e = Split::ExperimentCatalog.find_or_create("button_size", "small", "big")
expect {
# User shouldn't participate in this second experiment
ab_test("button_size", "small", "big")
}.not_to change { e.participant_count }
end
it "should not increment the counter for an ended experiment" do
e = Split::ExperimentCatalog.find_or_create("button_size", "small", "big")
e.winner = "small"
expect {
a = ab_test("button_size", "small", "big")
expect(a).to eq("small")
}.not_to change { e.participant_count }
end
it "should not increment the counter for an not started experiment" do
expect(Split.configuration).to receive(:start_manually).and_return(true)
e = Split::ExperimentCatalog.find_or_create("button_size", "small", "big")
expect {
a = ab_test("button_size", "small", "big")
expect(a).to eq("small")
}.not_to change { e.participant_count }
end
it "should return the given alternative for an existing user" do
expect(ab_test("link_color", "blue", "red")).to eq ab_test("link_color", "blue", "red")
end
it "should always return the winner if one is present" do
experiment.winner = "orange"
expect(ab_test("link_color", "blue", "red")).to eq("orange")
end
it "should allow the alternative to be forced by passing it in the params" do
# ?ab_test[link_color]=blue
@params = { "ab_test" => { "link_color" => "blue" } }
alternative = ab_test("link_color", "blue", "red")
expect(alternative).to eq("blue")
alternative = ab_test("link_color", { "blue" => 1 }, "red" => 5)
expect(alternative).to eq("blue")
@params = { "ab_test" => { "link_color" => "red" } }
alternative = ab_test("link_color", "blue", "red")
expect(alternative).to eq("red")
alternative = ab_test("link_color", { "blue" => 5 }, "red" => 1)
expect(alternative).to eq("red")
end
it "should not allow an arbitrary alternative" do
@params = { "ab_test" => { "link_color" => "pink" } }
alternative = ab_test("link_color", "blue")
expect(alternative).to eq("blue")
end
it "should not store the split when a param forced alternative" do
@params = { "ab_test" => { "link_color" => "blue" } }
expect(ab_user).not_to receive(:[]=)
ab_test("link_color", "blue", "red")
end
it "SPLIT_DISABLE query parameter should also force the alternative (uses control)" do
@params = { "SPLIT_DISABLE" => "true" }
alternative = ab_test("link_color", "blue", "red")
expect(alternative).to eq("blue")
alternative = ab_test("link_color", { "blue" => 1 }, "red" => 5)
expect(alternative).to eq("blue")
alternative = ab_test("link_color", "red", "blue")
expect(alternative).to eq("red")
alternative = ab_test("link_color", { "red" => 5 }, "blue" => 1)
expect(alternative).to eq("red")
end
it "should not store the split when Split generically disabled" do
@params = { "SPLIT_DISABLE" => "true" }
expect(ab_user).not_to receive(:[]=)
ab_test("link_color", "blue", "red")
end
context "when store_override is set" do
before { Split.configuration.store_override = true }
it "should store the forced alternative" do
@params = { "ab_test" => { "link_color" => "blue" } }
expect(ab_user).to receive(:[]=).with("link_color", "blue")
ab_test("link_color", "blue", "red")
end
end
context "when on_trial_choose is set" do
before { Split.configuration.on_trial_choose = :some_method }
it "should call the method" do
expect(self).to receive(:some_method)
ab_test("link_color", "blue", "red")
end
end
it "should allow passing a block" do
alt = ab_test("link_color", "blue", "red")
ret = ab_test("link_color", "blue", "red") { |alternative| "shared/#{alternative}" }
expect(ret).to eq("shared/#{alt}")
end
it "should allow the share of visitors see an alternative to be specified" do
ab_test("link_color", { "blue" => 0.8 }, { "red" => 20 })
expect(["red", "blue"]).to include(ab_user["link_color"])
end
it "should allow alternative weighting interface as a single hash" do
ab_test("link_color", { "blue" => 0.01 }, "red" => 0.2)
end
end
end
end
it "should select the correct alternatives after experiment resets" do
experiment = Split::ExperimentCatalog.find(:test_0)
experiment.reset
mock_user[experiment.key] = "test-alt"
expect(ab_user.active_experiments.size).to eq 1
expect(ab_test(:test_0, { "control" => 100 }, { "test-alt" => 1 })).to eq "test-alt"
expect(ab_test(:test_0, { "control" => 0 }, { "test-alt" => 100 })).to eq "test-alt"
end
it "lets override existing choice" do
pending "this requires user store reset on first call not depending on whelther it is current trial"
@params = { "ab_test" => { "test_1" => "test-alt" } }
expect(ab_test(:test_0, { "control" => 0 }, { "test-alt" => 100 })).to eq "control"
expect(ab_test(:test_1, { "control" => 100 }, { "test-alt" => 1 })).to eq "test-alt"
end
end
end
it "should not over-write a finished key when an experiment is on a later version" do
experiment.increment_version
ab_user = { experiment.key => "blue", experiment.finished_key => true }
finished_session = ab_user.dup
ab_test("link_color", "blue", "red")
expect(ab_user).to eq(finished_session)
end
end
describe "metadata" do
context "is defined" do
before do
Split.configuration.experiments = {
my_experiment: {
alternatives: ["one", "two"],
resettable: false,
metadata: { "one" => "Meta1", "two" => "Meta2" }
}
}
end
it "should be passed to helper block" do
@params = { "ab_test" => { "my_experiment" => "two" } }
expect(ab_test("my_experiment")).to eq "two"
expect(ab_test("my_experiment") do |alternative, meta|
meta
end).to eq("Meta2")
end
it "should pass control metadata helper block if library disabled" do
Split.configure do |config|
config.enabled = false
end
expect(ab_test("my_experiment")).to eq "one"
expect(ab_test("my_experiment") do |_, meta|
meta
end).to eq("Meta1")
end
end
context "is not defined" do
before do
Split.configuration.experiments = {
my_experiment: {
alternatives: ["one", "two"],
resettable: false,
metadata: nil
}
}
end
it "should be passed to helper block" do
expect(ab_test("my_experiment") do |alternative, meta|
meta
end).to eq({})
end
it "should pass control metadata helper block if library disabled" do
Split.configure do |config|
config.enabled = false
end
expect(ab_test("my_experiment") do |_, meta|
meta
end).to eq({})
end
end
end
describe "ab_finished" do
context "for an experiment that the user participates in" do
before(:each) do
@experiment_name = "link_color"
@alternatives = ["blue", "red"]
@experiment = Split::ExperimentCatalog.find_or_create(@experiment_name, *@alternatives)
@alternative_name = ab_test(@experiment_name, *@alternatives)
@previous_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count
end
it "should increment the counter for the completed alternative" do
ab_finished(@experiment_name)
new_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count
expect(new_completion_count).to eq(@previous_completion_count + 1)
end
it "should set experiment's finished key if reset is false" do
ab_finished(@experiment_name, { reset: false })
expect(ab_user[@experiment.key]).to eq(@alternative_name)
expect(ab_user[@experiment.finished_key]).to eq(true)
end
it "should not increment the counter if reset is false and the experiment has been already finished" do
2.times { ab_finished(@experiment_name, { reset: false }) }
new_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count
expect(new_completion_count).to eq(@previous_completion_count + 1)
end
it "should not increment the counter for an ended experiment" do
e = Split::ExperimentCatalog.find_or_create("button_size", "small", "big")
e.winner = "small"
a = ab_test("button_size", "small", "big")
expect(a).to eq("small")
expect {
ab_finished("button_size")
}.not_to change { Split::Alternative.new(a, "button_size").completed_count }
end
it "should clear out the user's participation from their session" do
expect(ab_user[@experiment.key]).to eq(@alternative_name)
ab_finished(@experiment_name)
expect(ab_user.keys).to be_empty
end
it "should not clear out the users session if reset is false" do
expect(ab_user[@experiment.key]).to eq(@alternative_name)
ab_finished(@experiment_name, { reset: false })
expect(ab_user[@experiment.key]).to eq(@alternative_name)
expect(ab_user[@experiment.finished_key]).to eq(true)
end
it "should reset the users session when experiment is not versioned" do
expect(ab_user[@experiment.key]).to eq(@alternative_name)
ab_finished(@experiment_name)
expect(ab_user.keys).to be_empty
end
it "should reset the users session when experiment is versioned" do
@experiment.increment_version
@alternative_name = ab_test(@experiment_name, *@alternatives)
expect(ab_user[@experiment.key]).to eq(@alternative_name)
ab_finished(@experiment_name)
expect(ab_user.keys).to be_empty
end
context "when on_trial_complete is set" do
before { Split.configuration.on_trial_complete = :some_method }
it "should call the method" do
expect(self).to receive(:some_method)
ab_finished(@experiment_name)
end
it "should not call the method without alternative" do
ab_user[@experiment.key] = nil
expect(self).not_to receive(:some_method)
ab_finished(@experiment_name)
end
end
end
context "for an experiment that the user is excluded from" do
before do
alternative = ab_test("link_color", "blue", "red")
expect(Split::Alternative.new(alternative, "link_color").participant_count).to eq(1)
alternative = ab_test("button_size", "small", "big")
expect(Split::Alternative.new(alternative, "button_size").participant_count).to eq(0)
end
it "should not increment the completed counter" do
# So, user should be participating in the link_color experiment and
# receive the control for button_size. As the user is not participating in
# the button size experiment, finishing it should not increase the
# completion count for that alternative.
expect {
ab_finished("button_size")
}.not_to change { Split::Alternative.new("small", "button_size").completed_count }
end
end
context "for an experiment that the user does not participate in" do
before do
Split::ExperimentCatalog.find_or_create(:not_started_experiment, "control", "alt")
end
it "should not raise an exception" do
expect { ab_finished(:not_started_experiment) }.not_to raise_exception
end
it "should not change the user state when reset is false" do
expect { ab_finished(:not_started_experiment, reset: false) }.not_to change { ab_user.keys }.from([])
end
it "should not change the user state when reset is true" do
expect(self).not_to receive(:reset!)
ab_finished(:not_started_experiment)
end
it "should not increment the completed counter" do
ab_finished(:not_started_experiment)
expect(Split::Alternative.new("control", :not_started_experiment).completed_count).to eq(0)
expect(Split::Alternative.new("alt", :not_started_experiment).completed_count).to eq(0)
end
end
end
context "finished with config" do
it "passes reset option" do
Split.configuration.experiments = {
my_experiment: {
alternatives: ["one", "two"],
resettable: false,
}
}
alternative = ab_test(:my_experiment)
experiment = Split::ExperimentCatalog.find :my_experiment
ab_finished :my_experiment
expect(ab_user[experiment.key]).to eq(alternative)
expect(ab_user[experiment.finished_key]).to eq(true)
end
end
context "finished with metric name" do
before { Split.configuration.experiments = {} }
before { expect(Split::Alternative).to receive(:new).at_least(1).times.and_call_original }
def should_finish_experiment(experiment_name, should_finish = true)
alts = Split.configuration.experiments[experiment_name][:alternatives]
experiment = Split::ExperimentCatalog.find_or_create(experiment_name, *alts)
alt_name = ab_user[experiment.key] = alts.first
alt = double("alternative")
expect(alt).to receive(:name).at_most(1).times.and_return(alt_name)
expect(Split::Alternative).to receive(:new).at_most(1).times.with(alt_name, experiment_name.to_s).and_return(alt)
if should_finish
expect(alt).to receive(:increment_completion).at_most(1).times
else
expect(alt).not_to receive(:increment_completion)
end
end
it "completes the test" do
Split.configuration.experiments[:my_experiment] = {
alternatives: [ "control_opt", "other_opt" ],
metric: :my_metric
}
should_finish_experiment :my_experiment
ab_finished :my_metric
end
it "completes all relevant tests" do
Split.configuration.experiments = {
exp_1: {
alternatives: [ "1-1", "1-2" ],
metric: :my_metric
},
exp_2: {
alternatives: [ "2-1", "2-2" ],
metric: :another_metric
},
exp_3: {
alternatives: [ "3-1", "3-2" ],
metric: :my_metric
},
}
should_finish_experiment :exp_1
should_finish_experiment :exp_2, false
should_finish_experiment :exp_3
ab_finished :my_metric
end
it "passes reset option" do
Split.configuration.experiments = {
my_exp: {
alternatives: ["one", "two"],
metric: :my_metric,
resettable: false,
}
}
alternative_name = ab_test(:my_exp)
exp = Split::ExperimentCatalog.find :my_exp
ab_finished :my_metric
expect(ab_user[exp.key]).to eq(alternative_name)
expect(ab_user[exp.finished_key]).to be_truthy
end
it "passes through options" do
Split.configuration.experiments = {
my_exp: {
alternatives: ["one", "two"],
metric: :my_metric,
}
}
alternative_name = ab_test(:my_exp)
exp = Split::ExperimentCatalog.find :my_exp
ab_finished :my_metric, reset: false
expect(ab_user[exp.key]).to eq(alternative_name)
expect(ab_user[exp.finished_key]).to be_truthy
end
end
describe "conversions" do
it "should return a conversion rate for an alternative" do
alternative_name = ab_test("link_color", "blue", "red")
previous_convertion_rate = Split::Alternative.new(alternative_name, "link_color").conversion_rate
expect(previous_convertion_rate).to eq(0.0)
ab_finished("link_color")
new_convertion_rate = Split::Alternative.new(alternative_name, "link_color").conversion_rate
expect(new_convertion_rate).to eq(1.0)
end
end
describe "active experiments" do
it "should show an active test" do
alternative = ab_test("def", "4", "5", "6")
expect(active_experiments.count).to eq 1
expect(active_experiments.first[0]).to eq "def"
expect(active_experiments.first[1]).to eq alternative
end
it "should show a finished test" do
alternative = ab_test("def", "4", "5", "6")
ab_finished("def", { reset: false })
expect(active_experiments.count).to eq 1
expect(active_experiments.first[0]).to eq "def"
expect(active_experiments.first[1]).to eq alternative
end
it "should show an active test when an experiment is on a later version" do
experiment.reset
expect(experiment.version).to eq(1)
ab_test("link_color", "blue", "red")
expect(active_experiments.count).to eq 1
expect(active_experiments.first[0]).to eq "link_color"
end
it "should show versioned tests properly" do
10.times { experiment.reset }
alternative = ab_test(experiment.name, "blue", "red")
ab_finished(experiment.name, reset: false)
expect(experiment.version).to eq(10)
expect(active_experiments.count).to eq 1
expect(active_experiments).to eq({ "link_color" => alternative })
end
it "should show multiple tests" do
Split.configure do |config|
config.allow_multiple_experiments = true
end
alternative = ab_test("def", "4", "5", "6")
another_alternative = ab_test("ghi", "7", "8", "9")
expect(active_experiments.count).to eq 2
expect(active_experiments["def"]).to eq alternative
expect(active_experiments["ghi"]).to eq another_alternative
end
it "should not show tests with winners" do
Split.configure do |config|
config.allow_multiple_experiments = true
end
e = Split::ExperimentCatalog.find_or_create("def", "4", "5", "6")
e.winner = "4"
ab_test("def", "4", "5", "6")
another_alternative = ab_test("ghi", "7", "8", "9")
expect(active_experiments.count).to eq 1
expect(active_experiments.first[0]).to eq "ghi"
expect(active_experiments.first[1]).to eq another_alternative
end
end
describe "when user is a robot" do
before(:each) do
@request = OpenStruct.new(user_agent: "Googlebot/2.1 (+http://www.google.com/bot.html)")
end
describe "ab_test" do
it "should return the control" do
alternative = ab_test("link_color", "blue", "red")
expect(alternative).to eq experiment.control.name
end
it "should not create a experiment" do
ab_test("link_color", "blue", "red")
expect(Split::Experiment.new("link_color")).to be_a_new_record
end
it "should not increment the participation count" do
previous_red_count = Split::Alternative.new("red", "link_color").participant_count
previous_blue_count = Split::Alternative.new("blue", "link_color").participant_count
ab_test("link_color", "blue", "red")
new_red_count = Split::Alternative.new("red", "link_color").participant_count
new_blue_count = Split::Alternative.new("blue", "link_color").participant_count
expect((new_red_count + new_blue_count)).to eq(previous_red_count + previous_blue_count)
end
end
describe "finished" do
it "should not increment the completed count" do
alternative_name = ab_test("link_color", "blue", "red")
previous_completion_count = Split::Alternative.new(alternative_name, "link_color").completed_count
ab_finished("link_color")
new_completion_count = Split::Alternative.new(alternative_name, "link_color").completed_count
expect(new_completion_count).to eq(previous_completion_count)
end
end
end
describe "when providing custom ignore logic" do
context "using a proc to configure custom logic" do
before(:each) do
Split.configure do |c|
c.ignore_filter = proc { |request| true } # ignore everything
end
end
it "ignores the ab_test" do
ab_test("link_color", "blue", "red")
red_count = Split::Alternative.new("red", "link_color").participant_count
blue_count = Split::Alternative.new("blue", "link_color").participant_count
expect((red_count + blue_count)).to be(0)
end
end
end
shared_examples_for "a disabled test" do
describe "ab_test" do
it "should return the control" do
alternative = ab_test("link_color", "blue", "red")
expect(alternative).to eq experiment.control.name
end
it "should not increment the participation count" do
previous_red_count = Split::Alternative.new("red", "link_color").participant_count
previous_blue_count = Split::Alternative.new("blue", "link_color").participant_count
ab_test("link_color", "blue", "red")
new_red_count = Split::Alternative.new("red", "link_color").participant_count
new_blue_count = Split::Alternative.new("blue", "link_color").participant_count
expect((new_red_count + new_blue_count)).to eq(previous_red_count + previous_blue_count)
end
end
describe "finished" do
it "should not increment the completed count" do
alternative_name = ab_test("link_color", "blue", "red")
previous_completion_count = Split::Alternative.new(alternative_name, "link_color").completed_count
ab_finished("link_color")
new_completion_count = Split::Alternative.new(alternative_name, "link_color").completed_count
expect(new_completion_count).to eq(previous_completion_count)
end
end
end
describe "when ip address is ignored" do
context "individually" do
before(:each) do
@request = OpenStruct.new(ip: "81.19.48.130")
Split.configure do |c|
c.ignore_ip_addresses << "81.19.48.130"
end
end
it_behaves_like "a disabled test"
end
context "for a range" do
before(:each) do
@request = OpenStruct.new(ip: "81.19.48.129")
Split.configure do |c|
c.ignore_ip_addresses << /81\.19\.48\.[0-9]+/
end
end
it_behaves_like "a disabled test"
end
context "using both a range and a specific value" do
before(:each) do
@request = OpenStruct.new(ip: "81.19.48.128")
Split.configure do |c|
c.ignore_ip_addresses << "81.19.48.130"
c.ignore_ip_addresses << /81\.19\.48\.[0-9]+/
end
end
it_behaves_like "a disabled test"
end
context "when ignored other address" do
before do
@request = OpenStruct.new(ip: "1.1.1.1")
Split.configure do |c|
c.ignore_ip_addresses << "81.19.48.130"
end
end
it "works as usual" do
alternative_name = ab_test("link_color", "red", "blue")
expect {
ab_finished("link_color")
}.to change(Split::Alternative.new(alternative_name, "link_color"), :completed_count).by(1)
end
end
end
describe "when user is previewing" do
before(:each) do
@request = OpenStruct.new(headers: { "x-purpose" => "preview" })
end
it_behaves_like "a disabled test"
end
describe "versioned experiments" do
it "should use version zero if no version is present" do
alternative_name = ab_test("link_color", "blue", "red")
expect(experiment.version).to eq(0)
expect(ab_user["link_color"]).to eq(alternative_name)
end
it "should save the version of the experiment to the session" do
experiment.reset
expect(experiment.version).to eq(1)
alternative_name = ab_test("link_color", "blue", "red")
expect(ab_user["link_color:1"]).to eq(alternative_name)
end
it "should load the experiment even if the version is not 0" do
experiment.reset
expect(experiment.version).to eq(1)
alternative_name = ab_test("link_color", "blue", "red")
expect(ab_user["link_color:1"]).to eq(alternative_name)
return_alternative_name = ab_test("link_color", "blue", "red")
expect(return_alternative_name).to eq(alternative_name)
end
it "should reset the session of a user on an older version of the experiment" do
alternative_name = ab_test("link_color", "blue", "red")
expect(ab_user["link_color"]).to eq(alternative_name)
alternative = Split::Alternative.new(alternative_name, "link_color")
expect(alternative.participant_count).to eq(1)
experiment.reset
expect(experiment.version).to eq(1)
alternative = Split::Alternative.new(alternative_name, "link_color")
expect(alternative.participant_count).to eq(0)
new_alternative_name = ab_test("link_color", "blue", "red")
expect(ab_user["link_color:1"]).to eq(new_alternative_name)
new_alternative = Split::Alternative.new(new_alternative_name, "link_color")
expect(new_alternative.participant_count).to eq(1)
end
it "should cleanup old versions of experiments from the session" do
alternative_name = ab_test("link_color", "blue", "red")
expect(ab_user["link_color"]).to eq(alternative_name)
alternative = Split::Alternative.new(alternative_name, "link_color")
expect(alternative.participant_count).to eq(1)
experiment.reset
expect(experiment.version).to eq(1)
alternative = Split::Alternative.new(alternative_name, "link_color")
expect(alternative.participant_count).to eq(0)
new_alternative_name = ab_test("link_color", "blue", "red")
expect(ab_user["link_color:1"]).to eq(new_alternative_name)
end
it "should only count completion of users on the current version" do
alternative_name = ab_test("link_color", "blue", "red")
expect(ab_user["link_color"]).to eq(alternative_name)
Split::Alternative.new(alternative_name, "link_color")
experiment.reset
expect(experiment.version).to eq(1)
ab_finished("link_color")
alternative = Split::Alternative.new(alternative_name, "link_color")
expect(alternative.completed_count).to eq(0)
end
end
context "when redis is not available" do
before(:each) do
expect(Split).to receive(:redis).at_most(5).times.and_raise(Errno::ECONNREFUSED.new)
end
context "and db_failover config option is turned off" do
before(:each) do
Split.configure do |config|
config.db_failover = false
end
end
describe "ab_test" do
it "should raise an exception" do
expect { ab_test("link_color", "blue", "red") }.to raise_error(Errno::ECONNREFUSED)
end
end
describe "finished" do
it "should raise an exception" do
expect { ab_finished("link_color") }.to raise_error(Errno::ECONNREFUSED)
end
end
describe "disable split testing" do
before(:each) do
Split.configure do |config|
config.enabled = false
end
end
it "should not attempt to connect to redis" do
expect { ab_test("link_color", "blue", "red") }.not_to raise_error
end
it "should return control variable" do
expect(ab_test("link_color", "blue", "red")).to eq("blue")
expect { ab_finished("link_color") }.not_to raise_error
end
end
end
context "and db_failover config option is turned on" do
before(:each) do
Split.configure do |config|
config.db_failover = true
end
end
describe "ab_test" do
it "should not raise an exception" do
expect { ab_test("link_color", "blue", "red") }.not_to raise_error
end
it "should call db_failover_on_db_error proc with error as parameter" do
Split.configure do |config|
config.db_failover_on_db_error = proc do |error|
expect(error).to be_a(Errno::ECONNREFUSED)
end
end
expect(Split.configuration.db_failover_on_db_error).to receive(:call).and_call_original
ab_test("link_color", "blue", "red")
end
it "should always use first alternative" do
expect(ab_test("link_color", "blue", "red")).to eq("blue")
expect(ab_test("link_color", { "blue" => 0.01 }, "red" => 0.2)).to eq("blue")
expect(ab_test("link_color", { "blue" => 0.8 }, { "red" => 20 })).to eq("blue")
expect(ab_test("link_color", "blue", "red") do |alternative|
"shared/#{alternative}"
end).to eq("shared/blue")
end
context "and db_failover_allow_parameter_override config option is turned on" do
before(:each) do
Split.configure do |config|
config.db_failover_allow_parameter_override = true
end
end
context "and given an override parameter" do
it "should use given override instead of the first alternative" do
@params = { "ab_test" => { "link_color" => "red" } }
expect(ab_test("link_color", "blue", "red")).to eq("red")
expect(ab_test("link_color", "blue", "red", "green")).to eq("red")
expect(ab_test("link_color", { "blue" => 0.01 }, "red" => 0.2)).to eq("red")
expect(ab_test("link_color", { "blue" => 0.8 }, { "red" => 20 })).to eq("red")
expect(ab_test("link_color", "blue", "red") do |alternative|
"shared/#{alternative}"
end).to eq("shared/red")
end
end
end
context "and preloaded config given" do
before do
Split.configuration.experiments[:link_color] = {
alternatives: [ "blue", "red" ],
}
end
it "uses first alternative" do
expect(ab_test(:link_color)).to eq("blue")
end
end
end
describe "finished" do
it "should not raise an exception" do
expect { ab_finished("link_color") }.not_to raise_error
end
it "should call db_failover_on_db_error proc with error as parameter" do
Split.configure do |config|
config.db_failover_on_db_error = proc do |error|
expect(error).to be_a(Errno::ECONNREFUSED)
end
end
expect(Split.configuration.db_failover_on_db_error).to receive(:call).and_call_original
ab_finished("link_color")
end
end
end
end
context "with preloaded config" do
before { Split.configuration.experiments = {} }
it "pulls options from config file" do
Split.configuration.experiments[:my_experiment] = {
alternatives: [ "control_opt", "other_opt" ],
goals: ["goal1", "goal2"]
}
ab_test :my_experiment
expect(Split::Experiment.new(:my_experiment).alternatives.map(&:name)).to eq([ "control_opt", "other_opt" ])
expect(Split::Experiment.new(:my_experiment).goals).to eq([ "goal1", "goal2" ])
end
it "can be called multiple times" do
Split.configuration.experiments[:my_experiment] = {
alternatives: [ "control_opt", "other_opt" ],
goals: ["goal1", "goal2"]
}
5.times { ab_test :my_experiment }
experiment = Split::Experiment.new(:my_experiment)
expect(experiment.alternatives.map(&:name)).to eq([ "control_opt", "other_opt" ])
expect(experiment.goals).to eq([ "goal1", "goal2" ])
expect(experiment.participant_count).to eq(1)
end
it "accepts multiple goals" do
Split.configuration.experiments[:my_experiment] = {
alternatives: [ "control_opt", "other_opt" ],
goals: [ "goal1", "goal2", "goal3" ]
}
ab_test :my_experiment
experiment = Split::Experiment.new(:my_experiment)
expect(experiment.goals).to eq([ "goal1", "goal2", "goal3" ])
end
it "allow specifying goals to be optional" do
Split.configuration.experiments[:my_experiment] = {
alternatives: [ "control_opt", "other_opt" ]
}
experiment = Split::Experiment.new(:my_experiment)
expect(experiment.goals).to eq([])
end
it "accepts multiple alternatives" do
Split.configuration.experiments[:my_experiment] = {
alternatives: [ "control_opt", "second_opt", "third_opt" ],
}
ab_test :my_experiment
experiment = Split::Experiment.new(:my_experiment)
expect(experiment.alternatives.map(&:name)).to eq([ "control_opt", "second_opt", "third_opt" ])
end
it "accepts probability on alternatives" do
Split.configuration.experiments[:my_experiment] = {
alternatives: [
{ name: "control_opt", percent: 67 },
{ name: "second_opt", percent: 10 },
{ name: "third_opt", percent: 23 },
],
}
ab_test :my_experiment
experiment = Split::Experiment.new(:my_experiment)
expect(experiment.alternatives.collect { |a| [a.name, a.weight] }).to eq([["control_opt", 0.67], ["second_opt", 0.1], ["third_opt", 0.23]])
end
it "accepts probability on some alternatives" do
Split.configuration.experiments[:my_experiment] = {
alternatives: [
{ name: "control_opt", percent: 34 },
"second_opt",
{ name: "third_opt", percent: 23 },
"fourth_opt",
],
}
ab_test :my_experiment
experiment = Split::Experiment.new(:my_experiment)
names_and_weights = experiment.alternatives.collect { |a| [a.name, a.weight] }
expect(names_and_weights).to eq([["control_opt", 0.34], ["second_opt", 0.215], ["third_opt", 0.23], ["fourth_opt", 0.215]])
expect(names_and_weights.inject(0) { |sum, nw| sum + nw[1] }).to eq(1.0)
end
it "allows name param without probability" do
Split.configuration.experiments[:my_experiment] = {
alternatives: [
{ name: "control_opt" },
"second_opt",
{ name: "third_opt", percent: 64 },
],
}
ab_test :my_experiment
experiment = Split::Experiment.new(:my_experiment)
names_and_weights = experiment.alternatives.collect { |a| [a.name, a.weight] }
expect(names_and_weights).to eq([["control_opt", 0.18], ["second_opt", 0.18], ["third_opt", 0.64]])
expect(names_and_weights.inject(0) { |sum, nw| sum + nw[1] }).to eq(1.0)
end
it "fails gracefully if config is missing experiment" do
Split.configuration.experiments = { other_experiment: { foo: "Bar" } }
expect { ab_test :my_experiment }.to raise_error(Split::ExperimentNotFound)
end
it "fails gracefully if config is missing" do
expect { Split.configuration.experiments = nil }.to raise_error(Split::InvalidExperimentsFormatError)
end
it "fails gracefully if config is missing alternatives" do
Split.configuration.experiments[:my_experiment] = { foo: "Bar" }
expect { ab_test :my_experiment }.to raise_error(NoMethodError)
end
end
it "should handle multiple experiments correctly" do
experiment2 = Split::ExperimentCatalog.find_or_create("link_color2", "blue", "red")
ab_test("link_color", "blue", "red")
ab_test("link_color2", "blue", "red")
ab_finished("link_color2")
experiment2.alternatives.each do |alt|
expect(alt.unfinished_count).to eq(0)
end
end
context "with goals" do
before do
@experiment = { "link_color" => ["purchase", "refund"] }
@alternatives = ["blue", "red"]
@experiment_name, @goals = normalize_metric(@experiment)
@goal1 = @goals[0]
@goal2 = @goals[1]
end
it "should normalize experiment" do
expect(@experiment_name).to eq("link_color")
expect(@goals).to eq(["purchase", "refund"])
end
describe "ab_test" do
it "should allow experiment goals interface as a single hash" do
ab_test(@experiment, *@alternatives)
experiment = Split::ExperimentCatalog.find("link_color")
expect(experiment.goals).to eq(["purchase", "refund"])
end
end
describe "ab_finished" do
before do
@alternative_name = ab_test(@experiment, *@alternatives)
end
it "should increment the counter for the specified-goal completed alternative" do
expect { ab_finished({ "link_color" => ["purchase"] }) }
.to change { Split::Alternative.new(@alternative_name, @experiment_name).completed_count(@goal2) }.by(0)
.and change { Split::Alternative.new(@alternative_name, @experiment_name).completed_count(@goal1) }.by(1)
end
end
end
end
<MSG> first pass at versioning experiments for better resets
<DFF> @@ -185,4 +185,63 @@ describe Split::Helper do
end
end
end
+
+ describe 'versioned experiments' do
+ it "should use version zero if no version is present" do
+ experiment = Split::Experiment.find_or_create('link_color', 'blue', 'red')
+ alternative_name = ab_test('link_color', 'blue', 'red')
+ experiment.version.should eql(0)
+ session[:split].should eql({'link_color' => alternative_name})
+ end
+
+ it "should save the version of the experiment to the session" do
+ experiment = Split::Experiment.find_or_create('link_color', 'blue', 'red')
+ experiment.reset
+ experiment.version.should eql(1)
+ alternative_name = ab_test('link_color', 'blue', 'red')
+ session[:split].should eql({'link_color:1' => alternative_name})
+ end
+
+ it "should load the experiment even if the version is not 0" do
+ experiment = Split::Experiment.find_or_create('link_color', 'blue', 'red')
+ experiment.reset
+ experiment.version.should eql(1)
+ alternative_name = ab_test('link_color', 'blue', 'red')
+ session[:split].should eql({'link_color:1' => alternative_name})
+ return_alternative_name = ab_test('link_color', 'blue', 'red')
+ return_alternative_name.should eql(alternative_name)
+ end
+
+ it "should reset the session of a user on an older version of the experiment" do
+ experiment = Split::Experiment.find_or_create('link_color', 'blue', 'red')
+ alternative_name = ab_test('link_color', 'blue', 'red')
+ session[:split].should eql({'link_color' => alternative_name})
+ alternative = Split::Alternative.find(alternative_name, 'link_color')
+ alternative.participant_count.should eql(1)
+
+ experiment.reset
+ experiment.version.should eql(1)
+ alternative = Split::Alternative.find(alternative_name, 'link_color')
+ alternative.participant_count.should eql(0)
+
+ new_alternative_name = ab_test('link_color', 'blue', 'red')
+ session[:split]['link_color:1'].should eql(new_alternative_name)
+ new_alternative = Split::Alternative.find(new_alternative_name, 'link_color')
+ new_alternative.participant_count.should eql(1)
+ end
+
+ it "should only count completion of users on the current version" do
+ experiment = Split::Experiment.find_or_create('link_color', 'blue', 'red')
+ alternative_name = ab_test('link_color', 'blue', 'red')
+ session[:split].should eql({'link_color' => alternative_name})
+ alternative = Split::Alternative.find(alternative_name, 'link_color')
+
+ experiment.reset
+ experiment.version.should eql(1)
+
+ finished('link_color')
+ alternative = Split::Alternative.find(alternative_name, 'link_color')
+ alternative.completed_count.should eql(0)
+ end
+ end
end
\ No newline at end of file
| 59 | first pass at versioning experiments for better resets | 0 | .rb | rb | mit | splitrb/split |
10071140 | <NME> helper_spec.rb
<BEF> # frozen_string_literal: true
require "spec_helper"
# TODO change some of these tests to use Rack::Test
describe Split::Helper do
include Split::Helper
let(:experiment) {
Split::ExperimentCatalog.find_or_create("link_color", "blue", "red")
}
describe "ab_test" do
it "should not raise an error when passed strings for alternatives" do
expect { ab_test("xyz", "1", "2", "3") }.not_to raise_error
end
it "should not raise an error when passed an array for alternatives" do
expect { ab_test("xyz", ["1", "2", "3"]) }.not_to raise_error
end
it "should raise the appropriate error when passed integers for alternatives" do
expect { ab_test("xyz", 1, 2, 3) }.to raise_error(ArgumentError)
end
it "should raise the appropriate error when passed symbols for alternatives" do
expect { ab_test("xyz", :a, :b, :c) }.to raise_error(ArgumentError)
end
it "should not raise error when passed an array for goals" do
expect { ab_test({ "link_color" => ["purchase", "refund"] }, "blue", "red") }.not_to raise_error
end
it "should not raise error when passed just one goal" do
expect { ab_test({ "link_color" => "purchase" }, "blue", "red") }.not_to raise_error
end
it "raises an appropriate error when processing combined expirements" do
Split.configuration.experiments = {
combined_exp_1: {
alternatives: [ { name: "control", percent: 50 }, { name: "test-alt", percent: 50 } ],
metric: :my_metric,
combined_experiments: [:combined_exp_1_sub_1]
}
}
Split::ExperimentCatalog.find_or_create("combined_exp_1")
expect { ab_test("combined_exp_1") }.to raise_error(Split::InvalidExperimentsFormatError)
end
it "should assign a random alternative to a new user when there are an equal number of alternatives assigned" do
ab_test("link_color", "blue", "red")
expect(["red", "blue"]).to include(ab_user["link_color"])
end
it "should increment the participation counter after assignment to a new user" do
previous_red_count = Split::Alternative.new("red", "link_color").participant_count
previous_blue_count = Split::Alternative.new("blue", "link_color").participant_count
ab_test("link_color", "blue", "red")
new_red_count = Split::Alternative.new("red", "link_color").participant_count
new_blue_count = Split::Alternative.new("blue", "link_color").participant_count
expect((new_red_count + new_blue_count)).to eq(previous_red_count + previous_blue_count + 1)
end
it "should not increment the counter for an experiment that the user is not participating in" do
ab_test("link_color", "blue", "red")
e = Split::ExperimentCatalog.find_or_create("button_size", "small", "big")
expect {
# User shouldn't participate in this second experiment
ab_test("button_size", "small", "big")
}.not_to change { e.participant_count }
end
it "should not increment the counter for an ended experiment" do
e = Split::ExperimentCatalog.find_or_create("button_size", "small", "big")
e.winner = "small"
expect {
a = ab_test("button_size", "small", "big")
expect(a).to eq("small")
}.not_to change { e.participant_count }
end
it "should not increment the counter for an not started experiment" do
expect(Split.configuration).to receive(:start_manually).and_return(true)
e = Split::ExperimentCatalog.find_or_create("button_size", "small", "big")
expect {
a = ab_test("button_size", "small", "big")
expect(a).to eq("small")
}.not_to change { e.participant_count }
end
it "should return the given alternative for an existing user" do
expect(ab_test("link_color", "blue", "red")).to eq ab_test("link_color", "blue", "red")
end
it "should always return the winner if one is present" do
experiment.winner = "orange"
expect(ab_test("link_color", "blue", "red")).to eq("orange")
end
it "should allow the alternative to be forced by passing it in the params" do
# ?ab_test[link_color]=blue
@params = { "ab_test" => { "link_color" => "blue" } }
alternative = ab_test("link_color", "blue", "red")
expect(alternative).to eq("blue")
alternative = ab_test("link_color", { "blue" => 1 }, "red" => 5)
expect(alternative).to eq("blue")
@params = { "ab_test" => { "link_color" => "red" } }
alternative = ab_test("link_color", "blue", "red")
expect(alternative).to eq("red")
alternative = ab_test("link_color", { "blue" => 5 }, "red" => 1)
expect(alternative).to eq("red")
end
it "should not allow an arbitrary alternative" do
@params = { "ab_test" => { "link_color" => "pink" } }
alternative = ab_test("link_color", "blue")
expect(alternative).to eq("blue")
end
it "should not store the split when a param forced alternative" do
@params = { "ab_test" => { "link_color" => "blue" } }
expect(ab_user).not_to receive(:[]=)
ab_test("link_color", "blue", "red")
end
it "SPLIT_DISABLE query parameter should also force the alternative (uses control)" do
@params = { "SPLIT_DISABLE" => "true" }
alternative = ab_test("link_color", "blue", "red")
expect(alternative).to eq("blue")
alternative = ab_test("link_color", { "blue" => 1 }, "red" => 5)
expect(alternative).to eq("blue")
alternative = ab_test("link_color", "red", "blue")
expect(alternative).to eq("red")
alternative = ab_test("link_color", { "red" => 5 }, "blue" => 1)
expect(alternative).to eq("red")
end
it "should not store the split when Split generically disabled" do
@params = { "SPLIT_DISABLE" => "true" }
expect(ab_user).not_to receive(:[]=)
ab_test("link_color", "blue", "red")
end
context "when store_override is set" do
before { Split.configuration.store_override = true }
it "should store the forced alternative" do
@params = { "ab_test" => { "link_color" => "blue" } }
expect(ab_user).to receive(:[]=).with("link_color", "blue")
ab_test("link_color", "blue", "red")
end
end
context "when on_trial_choose is set" do
before { Split.configuration.on_trial_choose = :some_method }
it "should call the method" do
expect(self).to receive(:some_method)
ab_test("link_color", "blue", "red")
end
end
it "should allow passing a block" do
alt = ab_test("link_color", "blue", "red")
ret = ab_test("link_color", "blue", "red") { |alternative| "shared/#{alternative}" }
expect(ret).to eq("shared/#{alt}")
end
it "should allow the share of visitors see an alternative to be specified" do
ab_test("link_color", { "blue" => 0.8 }, { "red" => 20 })
expect(["red", "blue"]).to include(ab_user["link_color"])
end
it "should allow alternative weighting interface as a single hash" do
ab_test("link_color", { "blue" => 0.01 }, "red" => 0.2)
end
end
end
end
it "should select the correct alternatives after experiment resets" do
experiment = Split::ExperimentCatalog.find(:test_0)
experiment.reset
mock_user[experiment.key] = "test-alt"
expect(ab_user.active_experiments.size).to eq 1
expect(ab_test(:test_0, { "control" => 100 }, { "test-alt" => 1 })).to eq "test-alt"
expect(ab_test(:test_0, { "control" => 0 }, { "test-alt" => 100 })).to eq "test-alt"
end
it "lets override existing choice" do
pending "this requires user store reset on first call not depending on whelther it is current trial"
@params = { "ab_test" => { "test_1" => "test-alt" } }
expect(ab_test(:test_0, { "control" => 0 }, { "test-alt" => 100 })).to eq "control"
expect(ab_test(:test_1, { "control" => 100 }, { "test-alt" => 1 })).to eq "test-alt"
end
end
end
it "should not over-write a finished key when an experiment is on a later version" do
experiment.increment_version
ab_user = { experiment.key => "blue", experiment.finished_key => true }
finished_session = ab_user.dup
ab_test("link_color", "blue", "red")
expect(ab_user).to eq(finished_session)
end
end
describe "metadata" do
context "is defined" do
before do
Split.configuration.experiments = {
my_experiment: {
alternatives: ["one", "two"],
resettable: false,
metadata: { "one" => "Meta1", "two" => "Meta2" }
}
}
end
it "should be passed to helper block" do
@params = { "ab_test" => { "my_experiment" => "two" } }
expect(ab_test("my_experiment")).to eq "two"
expect(ab_test("my_experiment") do |alternative, meta|
meta
end).to eq("Meta2")
end
it "should pass control metadata helper block if library disabled" do
Split.configure do |config|
config.enabled = false
end
expect(ab_test("my_experiment")).to eq "one"
expect(ab_test("my_experiment") do |_, meta|
meta
end).to eq("Meta1")
end
end
context "is not defined" do
before do
Split.configuration.experiments = {
my_experiment: {
alternatives: ["one", "two"],
resettable: false,
metadata: nil
}
}
end
it "should be passed to helper block" do
expect(ab_test("my_experiment") do |alternative, meta|
meta
end).to eq({})
end
it "should pass control metadata helper block if library disabled" do
Split.configure do |config|
config.enabled = false
end
expect(ab_test("my_experiment") do |_, meta|
meta
end).to eq({})
end
end
end
describe "ab_finished" do
context "for an experiment that the user participates in" do
before(:each) do
@experiment_name = "link_color"
@alternatives = ["blue", "red"]
@experiment = Split::ExperimentCatalog.find_or_create(@experiment_name, *@alternatives)
@alternative_name = ab_test(@experiment_name, *@alternatives)
@previous_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count
end
it "should increment the counter for the completed alternative" do
ab_finished(@experiment_name)
new_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count
expect(new_completion_count).to eq(@previous_completion_count + 1)
end
it "should set experiment's finished key if reset is false" do
ab_finished(@experiment_name, { reset: false })
expect(ab_user[@experiment.key]).to eq(@alternative_name)
expect(ab_user[@experiment.finished_key]).to eq(true)
end
it "should not increment the counter if reset is false and the experiment has been already finished" do
2.times { ab_finished(@experiment_name, { reset: false }) }
new_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count
expect(new_completion_count).to eq(@previous_completion_count + 1)
end
it "should not increment the counter for an ended experiment" do
e = Split::ExperimentCatalog.find_or_create("button_size", "small", "big")
e.winner = "small"
a = ab_test("button_size", "small", "big")
expect(a).to eq("small")
expect {
ab_finished("button_size")
}.not_to change { Split::Alternative.new(a, "button_size").completed_count }
end
it "should clear out the user's participation from their session" do
expect(ab_user[@experiment.key]).to eq(@alternative_name)
ab_finished(@experiment_name)
expect(ab_user.keys).to be_empty
end
it "should not clear out the users session if reset is false" do
expect(ab_user[@experiment.key]).to eq(@alternative_name)
ab_finished(@experiment_name, { reset: false })
expect(ab_user[@experiment.key]).to eq(@alternative_name)
expect(ab_user[@experiment.finished_key]).to eq(true)
end
it "should reset the users session when experiment is not versioned" do
expect(ab_user[@experiment.key]).to eq(@alternative_name)
ab_finished(@experiment_name)
expect(ab_user.keys).to be_empty
end
it "should reset the users session when experiment is versioned" do
@experiment.increment_version
@alternative_name = ab_test(@experiment_name, *@alternatives)
expect(ab_user[@experiment.key]).to eq(@alternative_name)
ab_finished(@experiment_name)
expect(ab_user.keys).to be_empty
end
context "when on_trial_complete is set" do
before { Split.configuration.on_trial_complete = :some_method }
it "should call the method" do
expect(self).to receive(:some_method)
ab_finished(@experiment_name)
end
it "should not call the method without alternative" do
ab_user[@experiment.key] = nil
expect(self).not_to receive(:some_method)
ab_finished(@experiment_name)
end
end
end
context "for an experiment that the user is excluded from" do
before do
alternative = ab_test("link_color", "blue", "red")
expect(Split::Alternative.new(alternative, "link_color").participant_count).to eq(1)
alternative = ab_test("button_size", "small", "big")
expect(Split::Alternative.new(alternative, "button_size").participant_count).to eq(0)
end
it "should not increment the completed counter" do
# So, user should be participating in the link_color experiment and
# receive the control for button_size. As the user is not participating in
# the button size experiment, finishing it should not increase the
# completion count for that alternative.
expect {
ab_finished("button_size")
}.not_to change { Split::Alternative.new("small", "button_size").completed_count }
end
end
context "for an experiment that the user does not participate in" do
before do
Split::ExperimentCatalog.find_or_create(:not_started_experiment, "control", "alt")
end
it "should not raise an exception" do
expect { ab_finished(:not_started_experiment) }.not_to raise_exception
end
it "should not change the user state when reset is false" do
expect { ab_finished(:not_started_experiment, reset: false) }.not_to change { ab_user.keys }.from([])
end
it "should not change the user state when reset is true" do
expect(self).not_to receive(:reset!)
ab_finished(:not_started_experiment)
end
it "should not increment the completed counter" do
ab_finished(:not_started_experiment)
expect(Split::Alternative.new("control", :not_started_experiment).completed_count).to eq(0)
expect(Split::Alternative.new("alt", :not_started_experiment).completed_count).to eq(0)
end
end
end
context "finished with config" do
it "passes reset option" do
Split.configuration.experiments = {
my_experiment: {
alternatives: ["one", "two"],
resettable: false,
}
}
alternative = ab_test(:my_experiment)
experiment = Split::ExperimentCatalog.find :my_experiment
ab_finished :my_experiment
expect(ab_user[experiment.key]).to eq(alternative)
expect(ab_user[experiment.finished_key]).to eq(true)
end
end
context "finished with metric name" do
before { Split.configuration.experiments = {} }
before { expect(Split::Alternative).to receive(:new).at_least(1).times.and_call_original }
def should_finish_experiment(experiment_name, should_finish = true)
alts = Split.configuration.experiments[experiment_name][:alternatives]
experiment = Split::ExperimentCatalog.find_or_create(experiment_name, *alts)
alt_name = ab_user[experiment.key] = alts.first
alt = double("alternative")
expect(alt).to receive(:name).at_most(1).times.and_return(alt_name)
expect(Split::Alternative).to receive(:new).at_most(1).times.with(alt_name, experiment_name.to_s).and_return(alt)
if should_finish
expect(alt).to receive(:increment_completion).at_most(1).times
else
expect(alt).not_to receive(:increment_completion)
end
end
it "completes the test" do
Split.configuration.experiments[:my_experiment] = {
alternatives: [ "control_opt", "other_opt" ],
metric: :my_metric
}
should_finish_experiment :my_experiment
ab_finished :my_metric
end
it "completes all relevant tests" do
Split.configuration.experiments = {
exp_1: {
alternatives: [ "1-1", "1-2" ],
metric: :my_metric
},
exp_2: {
alternatives: [ "2-1", "2-2" ],
metric: :another_metric
},
exp_3: {
alternatives: [ "3-1", "3-2" ],
metric: :my_metric
},
}
should_finish_experiment :exp_1
should_finish_experiment :exp_2, false
should_finish_experiment :exp_3
ab_finished :my_metric
end
it "passes reset option" do
Split.configuration.experiments = {
my_exp: {
alternatives: ["one", "two"],
metric: :my_metric,
resettable: false,
}
}
alternative_name = ab_test(:my_exp)
exp = Split::ExperimentCatalog.find :my_exp
ab_finished :my_metric
expect(ab_user[exp.key]).to eq(alternative_name)
expect(ab_user[exp.finished_key]).to be_truthy
end
it "passes through options" do
Split.configuration.experiments = {
my_exp: {
alternatives: ["one", "two"],
metric: :my_metric,
}
}
alternative_name = ab_test(:my_exp)
exp = Split::ExperimentCatalog.find :my_exp
ab_finished :my_metric, reset: false
expect(ab_user[exp.key]).to eq(alternative_name)
expect(ab_user[exp.finished_key]).to be_truthy
end
end
describe "conversions" do
it "should return a conversion rate for an alternative" do
alternative_name = ab_test("link_color", "blue", "red")
previous_convertion_rate = Split::Alternative.new(alternative_name, "link_color").conversion_rate
expect(previous_convertion_rate).to eq(0.0)
ab_finished("link_color")
new_convertion_rate = Split::Alternative.new(alternative_name, "link_color").conversion_rate
expect(new_convertion_rate).to eq(1.0)
end
end
describe "active experiments" do
it "should show an active test" do
alternative = ab_test("def", "4", "5", "6")
expect(active_experiments.count).to eq 1
expect(active_experiments.first[0]).to eq "def"
expect(active_experiments.first[1]).to eq alternative
end
it "should show a finished test" do
alternative = ab_test("def", "4", "5", "6")
ab_finished("def", { reset: false })
expect(active_experiments.count).to eq 1
expect(active_experiments.first[0]).to eq "def"
expect(active_experiments.first[1]).to eq alternative
end
it "should show an active test when an experiment is on a later version" do
experiment.reset
expect(experiment.version).to eq(1)
ab_test("link_color", "blue", "red")
expect(active_experiments.count).to eq 1
expect(active_experiments.first[0]).to eq "link_color"
end
it "should show versioned tests properly" do
10.times { experiment.reset }
alternative = ab_test(experiment.name, "blue", "red")
ab_finished(experiment.name, reset: false)
expect(experiment.version).to eq(10)
expect(active_experiments.count).to eq 1
expect(active_experiments).to eq({ "link_color" => alternative })
end
it "should show multiple tests" do
Split.configure do |config|
config.allow_multiple_experiments = true
end
alternative = ab_test("def", "4", "5", "6")
another_alternative = ab_test("ghi", "7", "8", "9")
expect(active_experiments.count).to eq 2
expect(active_experiments["def"]).to eq alternative
expect(active_experiments["ghi"]).to eq another_alternative
end
it "should not show tests with winners" do
Split.configure do |config|
config.allow_multiple_experiments = true
end
e = Split::ExperimentCatalog.find_or_create("def", "4", "5", "6")
e.winner = "4"
ab_test("def", "4", "5", "6")
another_alternative = ab_test("ghi", "7", "8", "9")
expect(active_experiments.count).to eq 1
expect(active_experiments.first[0]).to eq "ghi"
expect(active_experiments.first[1]).to eq another_alternative
end
end
describe "when user is a robot" do
before(:each) do
@request = OpenStruct.new(user_agent: "Googlebot/2.1 (+http://www.google.com/bot.html)")
end
describe "ab_test" do
it "should return the control" do
alternative = ab_test("link_color", "blue", "red")
expect(alternative).to eq experiment.control.name
end
it "should not create a experiment" do
ab_test("link_color", "blue", "red")
expect(Split::Experiment.new("link_color")).to be_a_new_record
end
it "should not increment the participation count" do
previous_red_count = Split::Alternative.new("red", "link_color").participant_count
previous_blue_count = Split::Alternative.new("blue", "link_color").participant_count
ab_test("link_color", "blue", "red")
new_red_count = Split::Alternative.new("red", "link_color").participant_count
new_blue_count = Split::Alternative.new("blue", "link_color").participant_count
expect((new_red_count + new_blue_count)).to eq(previous_red_count + previous_blue_count)
end
end
describe "finished" do
it "should not increment the completed count" do
alternative_name = ab_test("link_color", "blue", "red")
previous_completion_count = Split::Alternative.new(alternative_name, "link_color").completed_count
ab_finished("link_color")
new_completion_count = Split::Alternative.new(alternative_name, "link_color").completed_count
expect(new_completion_count).to eq(previous_completion_count)
end
end
end
describe "when providing custom ignore logic" do
context "using a proc to configure custom logic" do
before(:each) do
Split.configure do |c|
c.ignore_filter = proc { |request| true } # ignore everything
end
end
it "ignores the ab_test" do
ab_test("link_color", "blue", "red")
red_count = Split::Alternative.new("red", "link_color").participant_count
blue_count = Split::Alternative.new("blue", "link_color").participant_count
expect((red_count + blue_count)).to be(0)
end
end
end
shared_examples_for "a disabled test" do
describe "ab_test" do
it "should return the control" do
alternative = ab_test("link_color", "blue", "red")
expect(alternative).to eq experiment.control.name
end
it "should not increment the participation count" do
previous_red_count = Split::Alternative.new("red", "link_color").participant_count
previous_blue_count = Split::Alternative.new("blue", "link_color").participant_count
ab_test("link_color", "blue", "red")
new_red_count = Split::Alternative.new("red", "link_color").participant_count
new_blue_count = Split::Alternative.new("blue", "link_color").participant_count
expect((new_red_count + new_blue_count)).to eq(previous_red_count + previous_blue_count)
end
end
describe "finished" do
it "should not increment the completed count" do
alternative_name = ab_test("link_color", "blue", "red")
previous_completion_count = Split::Alternative.new(alternative_name, "link_color").completed_count
ab_finished("link_color")
new_completion_count = Split::Alternative.new(alternative_name, "link_color").completed_count
expect(new_completion_count).to eq(previous_completion_count)
end
end
end
describe "when ip address is ignored" do
context "individually" do
before(:each) do
@request = OpenStruct.new(ip: "81.19.48.130")
Split.configure do |c|
c.ignore_ip_addresses << "81.19.48.130"
end
end
it_behaves_like "a disabled test"
end
context "for a range" do
before(:each) do
@request = OpenStruct.new(ip: "81.19.48.129")
Split.configure do |c|
c.ignore_ip_addresses << /81\.19\.48\.[0-9]+/
end
end
it_behaves_like "a disabled test"
end
context "using both a range and a specific value" do
before(:each) do
@request = OpenStruct.new(ip: "81.19.48.128")
Split.configure do |c|
c.ignore_ip_addresses << "81.19.48.130"
c.ignore_ip_addresses << /81\.19\.48\.[0-9]+/
end
end
it_behaves_like "a disabled test"
end
context "when ignored other address" do
before do
@request = OpenStruct.new(ip: "1.1.1.1")
Split.configure do |c|
c.ignore_ip_addresses << "81.19.48.130"
end
end
it "works as usual" do
alternative_name = ab_test("link_color", "red", "blue")
expect {
ab_finished("link_color")
}.to change(Split::Alternative.new(alternative_name, "link_color"), :completed_count).by(1)
end
end
end
describe "when user is previewing" do
before(:each) do
@request = OpenStruct.new(headers: { "x-purpose" => "preview" })
end
it_behaves_like "a disabled test"
end
describe "versioned experiments" do
it "should use version zero if no version is present" do
alternative_name = ab_test("link_color", "blue", "red")
expect(experiment.version).to eq(0)
expect(ab_user["link_color"]).to eq(alternative_name)
end
it "should save the version of the experiment to the session" do
experiment.reset
expect(experiment.version).to eq(1)
alternative_name = ab_test("link_color", "blue", "red")
expect(ab_user["link_color:1"]).to eq(alternative_name)
end
it "should load the experiment even if the version is not 0" do
experiment.reset
expect(experiment.version).to eq(1)
alternative_name = ab_test("link_color", "blue", "red")
expect(ab_user["link_color:1"]).to eq(alternative_name)
return_alternative_name = ab_test("link_color", "blue", "red")
expect(return_alternative_name).to eq(alternative_name)
end
it "should reset the session of a user on an older version of the experiment" do
alternative_name = ab_test("link_color", "blue", "red")
expect(ab_user["link_color"]).to eq(alternative_name)
alternative = Split::Alternative.new(alternative_name, "link_color")
expect(alternative.participant_count).to eq(1)
experiment.reset
expect(experiment.version).to eq(1)
alternative = Split::Alternative.new(alternative_name, "link_color")
expect(alternative.participant_count).to eq(0)
new_alternative_name = ab_test("link_color", "blue", "red")
expect(ab_user["link_color:1"]).to eq(new_alternative_name)
new_alternative = Split::Alternative.new(new_alternative_name, "link_color")
expect(new_alternative.participant_count).to eq(1)
end
it "should cleanup old versions of experiments from the session" do
alternative_name = ab_test("link_color", "blue", "red")
expect(ab_user["link_color"]).to eq(alternative_name)
alternative = Split::Alternative.new(alternative_name, "link_color")
expect(alternative.participant_count).to eq(1)
experiment.reset
expect(experiment.version).to eq(1)
alternative = Split::Alternative.new(alternative_name, "link_color")
expect(alternative.participant_count).to eq(0)
new_alternative_name = ab_test("link_color", "blue", "red")
expect(ab_user["link_color:1"]).to eq(new_alternative_name)
end
it "should only count completion of users on the current version" do
alternative_name = ab_test("link_color", "blue", "red")
expect(ab_user["link_color"]).to eq(alternative_name)
Split::Alternative.new(alternative_name, "link_color")
experiment.reset
expect(experiment.version).to eq(1)
ab_finished("link_color")
alternative = Split::Alternative.new(alternative_name, "link_color")
expect(alternative.completed_count).to eq(0)
end
end
context "when redis is not available" do
before(:each) do
expect(Split).to receive(:redis).at_most(5).times.and_raise(Errno::ECONNREFUSED.new)
end
context "and db_failover config option is turned off" do
before(:each) do
Split.configure do |config|
config.db_failover = false
end
end
describe "ab_test" do
it "should raise an exception" do
expect { ab_test("link_color", "blue", "red") }.to raise_error(Errno::ECONNREFUSED)
end
end
describe "finished" do
it "should raise an exception" do
expect { ab_finished("link_color") }.to raise_error(Errno::ECONNREFUSED)
end
end
describe "disable split testing" do
before(:each) do
Split.configure do |config|
config.enabled = false
end
end
it "should not attempt to connect to redis" do
expect { ab_test("link_color", "blue", "red") }.not_to raise_error
end
it "should return control variable" do
expect(ab_test("link_color", "blue", "red")).to eq("blue")
expect { ab_finished("link_color") }.not_to raise_error
end
end
end
context "and db_failover config option is turned on" do
before(:each) do
Split.configure do |config|
config.db_failover = true
end
end
describe "ab_test" do
it "should not raise an exception" do
expect { ab_test("link_color", "blue", "red") }.not_to raise_error
end
it "should call db_failover_on_db_error proc with error as parameter" do
Split.configure do |config|
config.db_failover_on_db_error = proc do |error|
expect(error).to be_a(Errno::ECONNREFUSED)
end
end
expect(Split.configuration.db_failover_on_db_error).to receive(:call).and_call_original
ab_test("link_color", "blue", "red")
end
it "should always use first alternative" do
expect(ab_test("link_color", "blue", "red")).to eq("blue")
expect(ab_test("link_color", { "blue" => 0.01 }, "red" => 0.2)).to eq("blue")
expect(ab_test("link_color", { "blue" => 0.8 }, { "red" => 20 })).to eq("blue")
expect(ab_test("link_color", "blue", "red") do |alternative|
"shared/#{alternative}"
end).to eq("shared/blue")
end
context "and db_failover_allow_parameter_override config option is turned on" do
before(:each) do
Split.configure do |config|
config.db_failover_allow_parameter_override = true
end
end
context "and given an override parameter" do
it "should use given override instead of the first alternative" do
@params = { "ab_test" => { "link_color" => "red" } }
expect(ab_test("link_color", "blue", "red")).to eq("red")
expect(ab_test("link_color", "blue", "red", "green")).to eq("red")
expect(ab_test("link_color", { "blue" => 0.01 }, "red" => 0.2)).to eq("red")
expect(ab_test("link_color", { "blue" => 0.8 }, { "red" => 20 })).to eq("red")
expect(ab_test("link_color", "blue", "red") do |alternative|
"shared/#{alternative}"
end).to eq("shared/red")
end
end
end
context "and preloaded config given" do
before do
Split.configuration.experiments[:link_color] = {
alternatives: [ "blue", "red" ],
}
end
it "uses first alternative" do
expect(ab_test(:link_color)).to eq("blue")
end
end
end
describe "finished" do
it "should not raise an exception" do
expect { ab_finished("link_color") }.not_to raise_error
end
it "should call db_failover_on_db_error proc with error as parameter" do
Split.configure do |config|
config.db_failover_on_db_error = proc do |error|
expect(error).to be_a(Errno::ECONNREFUSED)
end
end
expect(Split.configuration.db_failover_on_db_error).to receive(:call).and_call_original
ab_finished("link_color")
end
end
end
end
context "with preloaded config" do
before { Split.configuration.experiments = {} }
it "pulls options from config file" do
Split.configuration.experiments[:my_experiment] = {
alternatives: [ "control_opt", "other_opt" ],
goals: ["goal1", "goal2"]
}
ab_test :my_experiment
expect(Split::Experiment.new(:my_experiment).alternatives.map(&:name)).to eq([ "control_opt", "other_opt" ])
expect(Split::Experiment.new(:my_experiment).goals).to eq([ "goal1", "goal2" ])
end
it "can be called multiple times" do
Split.configuration.experiments[:my_experiment] = {
alternatives: [ "control_opt", "other_opt" ],
goals: ["goal1", "goal2"]
}
5.times { ab_test :my_experiment }
experiment = Split::Experiment.new(:my_experiment)
expect(experiment.alternatives.map(&:name)).to eq([ "control_opt", "other_opt" ])
expect(experiment.goals).to eq([ "goal1", "goal2" ])
expect(experiment.participant_count).to eq(1)
end
it "accepts multiple goals" do
Split.configuration.experiments[:my_experiment] = {
alternatives: [ "control_opt", "other_opt" ],
goals: [ "goal1", "goal2", "goal3" ]
}
ab_test :my_experiment
experiment = Split::Experiment.new(:my_experiment)
expect(experiment.goals).to eq([ "goal1", "goal2", "goal3" ])
end
it "allow specifying goals to be optional" do
Split.configuration.experiments[:my_experiment] = {
alternatives: [ "control_opt", "other_opt" ]
}
experiment = Split::Experiment.new(:my_experiment)
expect(experiment.goals).to eq([])
end
it "accepts multiple alternatives" do
Split.configuration.experiments[:my_experiment] = {
alternatives: [ "control_opt", "second_opt", "third_opt" ],
}
ab_test :my_experiment
experiment = Split::Experiment.new(:my_experiment)
expect(experiment.alternatives.map(&:name)).to eq([ "control_opt", "second_opt", "third_opt" ])
end
it "accepts probability on alternatives" do
Split.configuration.experiments[:my_experiment] = {
alternatives: [
{ name: "control_opt", percent: 67 },
{ name: "second_opt", percent: 10 },
{ name: "third_opt", percent: 23 },
],
}
ab_test :my_experiment
experiment = Split::Experiment.new(:my_experiment)
expect(experiment.alternatives.collect { |a| [a.name, a.weight] }).to eq([["control_opt", 0.67], ["second_opt", 0.1], ["third_opt", 0.23]])
end
it "accepts probability on some alternatives" do
Split.configuration.experiments[:my_experiment] = {
alternatives: [
{ name: "control_opt", percent: 34 },
"second_opt",
{ name: "third_opt", percent: 23 },
"fourth_opt",
],
}
ab_test :my_experiment
experiment = Split::Experiment.new(:my_experiment)
names_and_weights = experiment.alternatives.collect { |a| [a.name, a.weight] }
expect(names_and_weights).to eq([["control_opt", 0.34], ["second_opt", 0.215], ["third_opt", 0.23], ["fourth_opt", 0.215]])
expect(names_and_weights.inject(0) { |sum, nw| sum + nw[1] }).to eq(1.0)
end
it "allows name param without probability" do
Split.configuration.experiments[:my_experiment] = {
alternatives: [
{ name: "control_opt" },
"second_opt",
{ name: "third_opt", percent: 64 },
],
}
ab_test :my_experiment
experiment = Split::Experiment.new(:my_experiment)
names_and_weights = experiment.alternatives.collect { |a| [a.name, a.weight] }
expect(names_and_weights).to eq([["control_opt", 0.18], ["second_opt", 0.18], ["third_opt", 0.64]])
expect(names_and_weights.inject(0) { |sum, nw| sum + nw[1] }).to eq(1.0)
end
it "fails gracefully if config is missing experiment" do
Split.configuration.experiments = { other_experiment: { foo: "Bar" } }
expect { ab_test :my_experiment }.to raise_error(Split::ExperimentNotFound)
end
it "fails gracefully if config is missing" do
expect { Split.configuration.experiments = nil }.to raise_error(Split::InvalidExperimentsFormatError)
end
it "fails gracefully if config is missing alternatives" do
Split.configuration.experiments[:my_experiment] = { foo: "Bar" }
expect { ab_test :my_experiment }.to raise_error(NoMethodError)
end
end
it "should handle multiple experiments correctly" do
experiment2 = Split::ExperimentCatalog.find_or_create("link_color2", "blue", "red")
ab_test("link_color", "blue", "red")
ab_test("link_color2", "blue", "red")
ab_finished("link_color2")
experiment2.alternatives.each do |alt|
expect(alt.unfinished_count).to eq(0)
end
end
context "with goals" do
before do
@experiment = { "link_color" => ["purchase", "refund"] }
@alternatives = ["blue", "red"]
@experiment_name, @goals = normalize_metric(@experiment)
@goal1 = @goals[0]
@goal2 = @goals[1]
end
it "should normalize experiment" do
expect(@experiment_name).to eq("link_color")
expect(@goals).to eq(["purchase", "refund"])
end
describe "ab_test" do
it "should allow experiment goals interface as a single hash" do
ab_test(@experiment, *@alternatives)
experiment = Split::ExperimentCatalog.find("link_color")
expect(experiment.goals).to eq(["purchase", "refund"])
end
end
describe "ab_finished" do
before do
@alternative_name = ab_test(@experiment, *@alternatives)
end
it "should increment the counter for the specified-goal completed alternative" do
expect { ab_finished({ "link_color" => ["purchase"] }) }
.to change { Split::Alternative.new(@alternative_name, @experiment_name).completed_count(@goal2) }.by(0)
.and change { Split::Alternative.new(@alternative_name, @experiment_name).completed_count(@goal1) }.by(1)
end
end
end
end
<MSG> first pass at versioning experiments for better resets
<DFF> @@ -185,4 +185,63 @@ describe Split::Helper do
end
end
end
+
+ describe 'versioned experiments' do
+ it "should use version zero if no version is present" do
+ experiment = Split::Experiment.find_or_create('link_color', 'blue', 'red')
+ alternative_name = ab_test('link_color', 'blue', 'red')
+ experiment.version.should eql(0)
+ session[:split].should eql({'link_color' => alternative_name})
+ end
+
+ it "should save the version of the experiment to the session" do
+ experiment = Split::Experiment.find_or_create('link_color', 'blue', 'red')
+ experiment.reset
+ experiment.version.should eql(1)
+ alternative_name = ab_test('link_color', 'blue', 'red')
+ session[:split].should eql({'link_color:1' => alternative_name})
+ end
+
+ it "should load the experiment even if the version is not 0" do
+ experiment = Split::Experiment.find_or_create('link_color', 'blue', 'red')
+ experiment.reset
+ experiment.version.should eql(1)
+ alternative_name = ab_test('link_color', 'blue', 'red')
+ session[:split].should eql({'link_color:1' => alternative_name})
+ return_alternative_name = ab_test('link_color', 'blue', 'red')
+ return_alternative_name.should eql(alternative_name)
+ end
+
+ it "should reset the session of a user on an older version of the experiment" do
+ experiment = Split::Experiment.find_or_create('link_color', 'blue', 'red')
+ alternative_name = ab_test('link_color', 'blue', 'red')
+ session[:split].should eql({'link_color' => alternative_name})
+ alternative = Split::Alternative.find(alternative_name, 'link_color')
+ alternative.participant_count.should eql(1)
+
+ experiment.reset
+ experiment.version.should eql(1)
+ alternative = Split::Alternative.find(alternative_name, 'link_color')
+ alternative.participant_count.should eql(0)
+
+ new_alternative_name = ab_test('link_color', 'blue', 'red')
+ session[:split]['link_color:1'].should eql(new_alternative_name)
+ new_alternative = Split::Alternative.find(new_alternative_name, 'link_color')
+ new_alternative.participant_count.should eql(1)
+ end
+
+ it "should only count completion of users on the current version" do
+ experiment = Split::Experiment.find_or_create('link_color', 'blue', 'red')
+ alternative_name = ab_test('link_color', 'blue', 'red')
+ session[:split].should eql({'link_color' => alternative_name})
+ alternative = Split::Alternative.find(alternative_name, 'link_color')
+
+ experiment.reset
+ experiment.version.should eql(1)
+
+ finished('link_color')
+ alternative = Split::Alternative.find(alternative_name, 'link_color')
+ alternative.completed_count.should eql(0)
+ end
+ end
end
\ No newline at end of file
| 59 | first pass at versioning experiments for better resets | 0 | .rb | rb | mit | splitrb/split |
10071141 | <NME> user_spec.rb
<BEF> # frozen_string_literal: true
require "spec_helper"
require "split/experiment_catalog"
require "split/experiment"
require "split/user"
describe Split::User do
let(:user_keys) { { "link_color" => "blue" } }
let(:context) { double(session: { split: user_keys }) }
let(:experiment) { Split::Experiment.new("link_color") }
before(:each) do
@subject = described_class.new(context)
end
it "delegates methods correctly" do
expect(@subject["link_color"]).to eq(@subject.user["link_color"])
end
context "#cleanup_old_versions!" do
let(:experiment_version) { "#{experiment.name}:1" }
let(:second_experiment_version) { "#{experiment.name}_another:1" }
let(:third_experiment_version) { "variation_of_#{experiment.name}:1" }
let(:user_keys) do
{
experiment_version => "blue",
second_experiment_version => "red",
third_experiment_version => "yellow"
}
end
before(:each) { @subject.cleanup_old_versions!(experiment) }
it "removes key if old experiment is found" do
expect(@subject.keys).not_to include(experiment_version)
end
it "does not remove other keys" do
expect(@subject.keys).to include(second_experiment_version, third_experiment_version)
end
end
context "#cleanup_old_experiments!" do
it "removes key if experiment is not found" do
@subject.cleanup_old_experiments!
expect(@subject.keys).to be_empty
end
it "removes key if experiment has a winner" do
allow(Split::ExperimentCatalog).to receive(:find).with("link_color").and_return(experiment)
allow(experiment).to receive(:start_time).and_return(Date.today)
allow(experiment).to receive(:has_winner?).and_return(true)
@subject.cleanup_old_experiments!
expect(@subject.keys).to be_empty
end
it "removes key if experiment has not started yet" do
allow(Split::ExperimentCatalog).to receive(:find).with("link_color").and_return(experiment)
end
end
end
end
end
context "when already cleaned up" do
before do
@subject.cleanup_old_experiments!
end
it "does not clean up again" do
expect(@subject).to_not receive(:keys_without_finished)
@subject.cleanup_old_experiments!
end
end
end
context "allows user to be loaded from adapter" do
it "loads user from adapter (RedisAdapter)" do
user = Split::Persistence::RedisAdapter.new(nil, 112233)
user["foo"] = "bar"
ab_user = Split::User.find(112233, :redis)
expect(ab_user["foo"]).to eql("bar")
end
it "returns nil if adapter does not implement a finder method" do
ab_user = Split::User.find(112233, :dual_adapter)
expect(ab_user).to be_nil
end
end
context "instantiated with custom adapter" do
let(:custom_adapter) { double(:persistence_adapter) }
before do
@subject = described_class.new(context, custom_adapter)
end
it "sets user to the custom adapter" do
expect(@subject.user).to eq(custom_adapter)
end
end
end
<MSG> Optional custom persistence adapter
<DFF> @@ -60,4 +60,17 @@ describe Split::User do
end
end
end
+
+ context "instantiated with custom adapter" do
+ let(:custom_adapter) { double(:persistence_adapter) }
+
+ before do
+ @subject = described_class.new(context, custom_adapter)
+ end
+
+ it "sets user to the custom adapter" do
+ expect(@subject.user).to eq(custom_adapter)
+ end
+ end
+
end
| 13 | Optional custom persistence adapter | 0 | .rb | rb | mit | splitrb/split |
10071142 | <NME> user_spec.rb
<BEF> # frozen_string_literal: true
require "spec_helper"
require "split/experiment_catalog"
require "split/experiment"
require "split/user"
describe Split::User do
let(:user_keys) { { "link_color" => "blue" } }
let(:context) { double(session: { split: user_keys }) }
let(:experiment) { Split::Experiment.new("link_color") }
before(:each) do
@subject = described_class.new(context)
end
it "delegates methods correctly" do
expect(@subject["link_color"]).to eq(@subject.user["link_color"])
end
context "#cleanup_old_versions!" do
let(:experiment_version) { "#{experiment.name}:1" }
let(:second_experiment_version) { "#{experiment.name}_another:1" }
let(:third_experiment_version) { "variation_of_#{experiment.name}:1" }
let(:user_keys) do
{
experiment_version => "blue",
second_experiment_version => "red",
third_experiment_version => "yellow"
}
end
before(:each) { @subject.cleanup_old_versions!(experiment) }
it "removes key if old experiment is found" do
expect(@subject.keys).not_to include(experiment_version)
end
it "does not remove other keys" do
expect(@subject.keys).to include(second_experiment_version, third_experiment_version)
end
end
context "#cleanup_old_experiments!" do
it "removes key if experiment is not found" do
@subject.cleanup_old_experiments!
expect(@subject.keys).to be_empty
end
it "removes key if experiment has a winner" do
allow(Split::ExperimentCatalog).to receive(:find).with("link_color").and_return(experiment)
allow(experiment).to receive(:start_time).and_return(Date.today)
allow(experiment).to receive(:has_winner?).and_return(true)
@subject.cleanup_old_experiments!
expect(@subject.keys).to be_empty
end
it "removes key if experiment has not started yet" do
allow(Split::ExperimentCatalog).to receive(:find).with("link_color").and_return(experiment)
end
end
end
end
end
context "when already cleaned up" do
before do
@subject.cleanup_old_experiments!
end
it "does not clean up again" do
expect(@subject).to_not receive(:keys_without_finished)
@subject.cleanup_old_experiments!
end
end
end
context "allows user to be loaded from adapter" do
it "loads user from adapter (RedisAdapter)" do
user = Split::Persistence::RedisAdapter.new(nil, 112233)
user["foo"] = "bar"
ab_user = Split::User.find(112233, :redis)
expect(ab_user["foo"]).to eql("bar")
end
it "returns nil if adapter does not implement a finder method" do
ab_user = Split::User.find(112233, :dual_adapter)
expect(ab_user).to be_nil
end
end
context "instantiated with custom adapter" do
let(:custom_adapter) { double(:persistence_adapter) }
before do
@subject = described_class.new(context, custom_adapter)
end
it "sets user to the custom adapter" do
expect(@subject.user).to eq(custom_adapter)
end
end
end
<MSG> Optional custom persistence adapter
<DFF> @@ -60,4 +60,17 @@ describe Split::User do
end
end
end
+
+ context "instantiated with custom adapter" do
+ let(:custom_adapter) { double(:persistence_adapter) }
+
+ before do
+ @subject = described_class.new(context, custom_adapter)
+ end
+
+ it "sets user to the custom adapter" do
+ expect(@subject.user).to eq(custom_adapter)
+ end
+ end
+
end
| 13 | Optional custom persistence adapter | 0 | .rb | rb | mit | splitrb/split |
10071143 | <NME> user_spec.rb
<BEF> # frozen_string_literal: true
require "spec_helper"
require "split/experiment_catalog"
require "split/experiment"
require "split/user"
describe Split::User do
let(:user_keys) { { "link_color" => "blue" } }
let(:context) { double(session: { split: user_keys }) }
let(:experiment) { Split::Experiment.new("link_color") }
before(:each) do
@subject = described_class.new(context)
end
it "delegates methods correctly" do
expect(@subject["link_color"]).to eq(@subject.user["link_color"])
end
context "#cleanup_old_versions!" do
let(:experiment_version) { "#{experiment.name}:1" }
let(:second_experiment_version) { "#{experiment.name}_another:1" }
let(:third_experiment_version) { "variation_of_#{experiment.name}:1" }
let(:user_keys) do
{
experiment_version => "blue",
second_experiment_version => "red",
third_experiment_version => "yellow"
}
end
before(:each) { @subject.cleanup_old_versions!(experiment) }
it "removes key if old experiment is found" do
expect(@subject.keys).not_to include(experiment_version)
end
it "does not remove other keys" do
expect(@subject.keys).to include(second_experiment_version, third_experiment_version)
end
end
context "#cleanup_old_experiments!" do
it "removes key if experiment is not found" do
@subject.cleanup_old_experiments!
expect(@subject.keys).to be_empty
end
it "removes key if experiment has a winner" do
allow(Split::ExperimentCatalog).to receive(:find).with("link_color").and_return(experiment)
allow(experiment).to receive(:start_time).and_return(Date.today)
allow(experiment).to receive(:has_winner?).and_return(true)
@subject.cleanup_old_experiments!
expect(@subject.keys).to be_empty
end
it "removes key if experiment has not started yet" do
allow(Split::ExperimentCatalog).to receive(:find).with("link_color").and_return(experiment)
end
end
end
end
end
context "when already cleaned up" do
before do
@subject.cleanup_old_experiments!
end
it "does not clean up again" do
expect(@subject).to_not receive(:keys_without_finished)
@subject.cleanup_old_experiments!
end
end
end
context "allows user to be loaded from adapter" do
it "loads user from adapter (RedisAdapter)" do
user = Split::Persistence::RedisAdapter.new(nil, 112233)
user["foo"] = "bar"
ab_user = Split::User.find(112233, :redis)
expect(ab_user["foo"]).to eql("bar")
end
it "returns nil if adapter does not implement a finder method" do
ab_user = Split::User.find(112233, :dual_adapter)
expect(ab_user).to be_nil
end
end
context "instantiated with custom adapter" do
let(:custom_adapter) { double(:persistence_adapter) }
before do
@subject = described_class.new(context, custom_adapter)
end
it "sets user to the custom adapter" do
expect(@subject.user).to eq(custom_adapter)
end
end
end
<MSG> Optional custom persistence adapter
<DFF> @@ -60,4 +60,17 @@ describe Split::User do
end
end
end
+
+ context "instantiated with custom adapter" do
+ let(:custom_adapter) { double(:persistence_adapter) }
+
+ before do
+ @subject = described_class.new(context, custom_adapter)
+ end
+
+ it "sets user to the custom adapter" do
+ expect(@subject.user).to eq(custom_adapter)
+ end
+ end
+
end
| 13 | Optional custom persistence adapter | 0 | .rb | rb | mit | splitrb/split |
10071144 | <NME> spec_helper.rb
<BEF> # frozen_string_literal: true
ENV["RACK_ENV"] = "test"
require "rubygems"
require "bundler/setup"
require "simplecov"
SimpleCov.start
require "split"
require "ostruct"
require "yaml"
Dir["./spec/support/*.rb"].each { |f| require f }
module GlobalSharedContext
extend RSpec::SharedContext
let(:mock_user) { Split::User.new(double(session: {})) }
before(:each) do
Split.redis = Redis.new
Split.redis.select(10)
Split.redis.flushdb
Split::ExperimentCatalog.clear_cache
@ab_user = mock_user
params = nil
end
end
end
RSpec.configure do |config|
config.order = "random"
config.include GlobalSharedContext
config.raise_errors_for_deprecations!
end
def session
@session ||= {}
end
def params
@params ||= {}
end
def request(ua = "Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_6; de-de) AppleWebKit/533.20.25 (KHTML, like Gecko) Version/5.0.4 Safari/533.20.27")
@request ||= begin
r = OpenStruct.new
r.user_agent = ua
r.ip = "192.168.1.1"
r
end
end
<MSG> Added Split::Cache with config and spec, and wired into a few places
<DFF> @@ -22,7 +22,7 @@ module GlobalSharedContext
Split.redis = Redis.new
Split.redis.select(10)
Split.redis.flushdb
- Split::ExperimentCatalog.clear_cache
+ Split::Cache.clear
@ab_user = mock_user
params = nil
end
| 1 | Added Split::Cache with config and spec, and wired into a few places | 1 | .rb | rb | mit | splitrb/split |
10071145 | <NME> spec_helper.rb
<BEF> # frozen_string_literal: true
ENV["RACK_ENV"] = "test"
require "rubygems"
require "bundler/setup"
require "simplecov"
SimpleCov.start
require "split"
require "ostruct"
require "yaml"
Dir["./spec/support/*.rb"].each { |f| require f }
module GlobalSharedContext
extend RSpec::SharedContext
let(:mock_user) { Split::User.new(double(session: {})) }
before(:each) do
Split.redis = Redis.new
Split.redis.select(10)
Split.redis.flushdb
Split::ExperimentCatalog.clear_cache
@ab_user = mock_user
params = nil
end
end
end
RSpec.configure do |config|
config.order = "random"
config.include GlobalSharedContext
config.raise_errors_for_deprecations!
end
def session
@session ||= {}
end
def params
@params ||= {}
end
def request(ua = "Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_6; de-de) AppleWebKit/533.20.25 (KHTML, like Gecko) Version/5.0.4 Safari/533.20.27")
@request ||= begin
r = OpenStruct.new
r.user_agent = ua
r.ip = "192.168.1.1"
r
end
end
<MSG> Added Split::Cache with config and spec, and wired into a few places
<DFF> @@ -22,7 +22,7 @@ module GlobalSharedContext
Split.redis = Redis.new
Split.redis.select(10)
Split.redis.flushdb
- Split::ExperimentCatalog.clear_cache
+ Split::Cache.clear
@ab_user = mock_user
params = nil
end
| 1 | Added Split::Cache with config and spec, and wired into a few places | 1 | .rb | rb | mit | splitrb/split |
10071146 | <NME> spec_helper.rb
<BEF> # frozen_string_literal: true
ENV["RACK_ENV"] = "test"
require "rubygems"
require "bundler/setup"
require "simplecov"
SimpleCov.start
require "split"
require "ostruct"
require "yaml"
Dir["./spec/support/*.rb"].each { |f| require f }
module GlobalSharedContext
extend RSpec::SharedContext
let(:mock_user) { Split::User.new(double(session: {})) }
before(:each) do
Split.redis = Redis.new
Split.redis.select(10)
Split.redis.flushdb
Split::ExperimentCatalog.clear_cache
@ab_user = mock_user
params = nil
end
end
end
RSpec.configure do |config|
config.order = "random"
config.include GlobalSharedContext
config.raise_errors_for_deprecations!
end
def session
@session ||= {}
end
def params
@params ||= {}
end
def request(ua = "Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_6; de-de) AppleWebKit/533.20.25 (KHTML, like Gecko) Version/5.0.4 Safari/533.20.27")
@request ||= begin
r = OpenStruct.new
r.user_agent = ua
r.ip = "192.168.1.1"
r
end
end
<MSG> Added Split::Cache with config and spec, and wired into a few places
<DFF> @@ -22,7 +22,7 @@ module GlobalSharedContext
Split.redis = Redis.new
Split.redis.select(10)
Split.redis.flushdb
- Split::ExperimentCatalog.clear_cache
+ Split::Cache.clear
@ab_user = mock_user
params = nil
end
| 1 | Added Split::Cache with config and spec, and wired into a few places | 1 | .rb | rb | mit | splitrb/split |
10071147 | <NME> metric.rb
<BEF> # frozen_string_literal: true
module Split
class Metric
attr_accessor :name
attr_accessor :experiments
def initialize(attrs = {})
attrs.each do |key, value|
if self.respond_to?("#{key}=")
self.send("#{key}=", value)
end
end
end
def self.load_from_redis(name)
metric = Split.redis.hget(:metrics, name)
if metric
experiment_names = metric.split(",")
experiments = experiment_names.collect do |experiment_name|
Split::ExperimentCatalog.find(experiment_name)
end
Split::Metric.new(name: name, experiments: experiments)
else
nil
end
end
def self.load_from_configuration(name)
metrics = Split.configuration.metrics
if metrics && metrics[name]
Split::Metric.new(experiments: metrics[name], name: name)
else
nil
end
end
def self.find(name)
name = name.intern if name.is_a?(String)
metric
end
def self.possible_experiments(metric_name)
experiments = []
metric = Split::Metric.find(metric_name)
experiments = []
metric = Split::Metric.find(metric_name)
if metric
experiments << metric.experiments
end
experiment = Split::ExperimentCatalog.find(metric_name)
if experiment
experiments << experiment
end
experiments.flatten
end
def save
Split.redis.hset(:metrics, name, experiments.map(&:name).join(","))
end
def complete!
experiments.each do |experiment|
experiment.complete!
end
end
def self.normalize_metric(label)
if Hash === label
metric_name = label.keys.first
goals = label.values.first
else
metric_name = label
goals = []
end
return metric_name, goals
end
end
end
end
<MSG> Add find_or_create and all methods to Metric
<DFF> @@ -42,6 +42,25 @@ module Split
metric
end
+ def self.find_or_create(attrs)
+ metric = find(attrs[:name])
+ unless metric
+ metric = new(attrs)
+ metric.save
+ end
+ metric
+ end
+
+ def self.all
+ redis_metrics = Split.redis.hgetall(:metrics).collect do |key, value|
+ find(key)
+ end
+ configuration_metrics = Split.configuration.metrics.collect do |key, value|
+ new(name: key, experiments: value)
+ end
+ redis_metrics | configuration_metrics
+ end
+
def self.possible_experiments(metric_name)
experiments = []
metric = Split::Metric.find(metric_name)
@@ -78,4 +97,4 @@ module Split
return metric_name, goals
end
end
-end
\ No newline at end of file
+end
| 20 | Add find_or_create and all methods to Metric | 1 | .rb | rb | mit | splitrb/split |
10071148 | <NME> metric.rb
<BEF> # frozen_string_literal: true
module Split
class Metric
attr_accessor :name
attr_accessor :experiments
def initialize(attrs = {})
attrs.each do |key, value|
if self.respond_to?("#{key}=")
self.send("#{key}=", value)
end
end
end
def self.load_from_redis(name)
metric = Split.redis.hget(:metrics, name)
if metric
experiment_names = metric.split(",")
experiments = experiment_names.collect do |experiment_name|
Split::ExperimentCatalog.find(experiment_name)
end
Split::Metric.new(name: name, experiments: experiments)
else
nil
end
end
def self.load_from_configuration(name)
metrics = Split.configuration.metrics
if metrics && metrics[name]
Split::Metric.new(experiments: metrics[name], name: name)
else
nil
end
end
def self.find(name)
name = name.intern if name.is_a?(String)
metric
end
def self.possible_experiments(metric_name)
experiments = []
metric = Split::Metric.find(metric_name)
experiments = []
metric = Split::Metric.find(metric_name)
if metric
experiments << metric.experiments
end
experiment = Split::ExperimentCatalog.find(metric_name)
if experiment
experiments << experiment
end
experiments.flatten
end
def save
Split.redis.hset(:metrics, name, experiments.map(&:name).join(","))
end
def complete!
experiments.each do |experiment|
experiment.complete!
end
end
def self.normalize_metric(label)
if Hash === label
metric_name = label.keys.first
goals = label.values.first
else
metric_name = label
goals = []
end
return metric_name, goals
end
end
end
end
<MSG> Add find_or_create and all methods to Metric
<DFF> @@ -42,6 +42,25 @@ module Split
metric
end
+ def self.find_or_create(attrs)
+ metric = find(attrs[:name])
+ unless metric
+ metric = new(attrs)
+ metric.save
+ end
+ metric
+ end
+
+ def self.all
+ redis_metrics = Split.redis.hgetall(:metrics).collect do |key, value|
+ find(key)
+ end
+ configuration_metrics = Split.configuration.metrics.collect do |key, value|
+ new(name: key, experiments: value)
+ end
+ redis_metrics | configuration_metrics
+ end
+
def self.possible_experiments(metric_name)
experiments = []
metric = Split::Metric.find(metric_name)
@@ -78,4 +97,4 @@ module Split
return metric_name, goals
end
end
-end
\ No newline at end of file
+end
| 20 | Add find_or_create and all methods to Metric | 1 | .rb | rb | mit | splitrb/split |
10071149 | <NME> metric.rb
<BEF> # frozen_string_literal: true
module Split
class Metric
attr_accessor :name
attr_accessor :experiments
def initialize(attrs = {})
attrs.each do |key, value|
if self.respond_to?("#{key}=")
self.send("#{key}=", value)
end
end
end
def self.load_from_redis(name)
metric = Split.redis.hget(:metrics, name)
if metric
experiment_names = metric.split(",")
experiments = experiment_names.collect do |experiment_name|
Split::ExperimentCatalog.find(experiment_name)
end
Split::Metric.new(name: name, experiments: experiments)
else
nil
end
end
def self.load_from_configuration(name)
metrics = Split.configuration.metrics
if metrics && metrics[name]
Split::Metric.new(experiments: metrics[name], name: name)
else
nil
end
end
def self.find(name)
name = name.intern if name.is_a?(String)
metric
end
def self.possible_experiments(metric_name)
experiments = []
metric = Split::Metric.find(metric_name)
experiments = []
metric = Split::Metric.find(metric_name)
if metric
experiments << metric.experiments
end
experiment = Split::ExperimentCatalog.find(metric_name)
if experiment
experiments << experiment
end
experiments.flatten
end
def save
Split.redis.hset(:metrics, name, experiments.map(&:name).join(","))
end
def complete!
experiments.each do |experiment|
experiment.complete!
end
end
def self.normalize_metric(label)
if Hash === label
metric_name = label.keys.first
goals = label.values.first
else
metric_name = label
goals = []
end
return metric_name, goals
end
end
end
end
<MSG> Add find_or_create and all methods to Metric
<DFF> @@ -42,6 +42,25 @@ module Split
metric
end
+ def self.find_or_create(attrs)
+ metric = find(attrs[:name])
+ unless metric
+ metric = new(attrs)
+ metric.save
+ end
+ metric
+ end
+
+ def self.all
+ redis_metrics = Split.redis.hgetall(:metrics).collect do |key, value|
+ find(key)
+ end
+ configuration_metrics = Split.configuration.metrics.collect do |key, value|
+ new(name: key, experiments: value)
+ end
+ redis_metrics | configuration_metrics
+ end
+
def self.possible_experiments(metric_name)
experiments = []
metric = Split::Metric.find(metric_name)
@@ -78,4 +97,4 @@ module Split
return metric_name, goals
end
end
-end
\ No newline at end of file
+end
| 20 | Add find_or_create and all methods to Metric | 1 | .rb | rb | mit | splitrb/split |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.