repo
string | commit
string | message
string | diff
string |
---|---|---|---|
smtlaissezfaire/hopcroft
|
6e06b430085ddd4d4d789b72305fbb47db776a37
|
include instead of extend'ing Identifiable
|
diff --git a/lib/hopcroft/machine/identifiable.rb b/lib/hopcroft/machine/identifiable.rb
index 767b26a..85bd6a3 100644
--- a/lib/hopcroft/machine/identifiable.rb
+++ b/lib/hopcroft/machine/identifiable.rb
@@ -1,38 +1,40 @@
module Hopcroft
module Machine
module Identifiable
class << self
def extended(mod)
mod.extend ClassMethods
mod.class_eval do
include InstanceMethods
end
end
+
+ alias_method :included, :extended
end
module ClassMethods
def reset_counter!
@counter = 1
end
def next_counter
returning counter do |c|
@counter += 1
end
end
def counter
@counter ||= 1
end
end
module InstanceMethods
def track_id
@id = self.class.next_counter
end
attr_reader :id
end
end
end
end
\ No newline at end of file
diff --git a/lib/hopcroft/machine/state.rb b/lib/hopcroft/machine/state.rb
index 602a88c..16681a2 100644
--- a/lib/hopcroft/machine/state.rb
+++ b/lib/hopcroft/machine/state.rb
@@ -1,128 +1,128 @@
module Hopcroft
module Machine
class State
- extend Identifiable
+ include Identifiable
def initialize(options={})
track_id
@start_state = options[:start_state] if options.has_key?(:start_state)
@final_state = options[:final] if options.has_key?(:final)
assign_name(options)
end
attr_reader :name
alias_method :to_s, :name
def inspect
"#{name} {start: #{start_state?}, final: #{final_state?}, transitions: #{transitions.size}}"
end
def transitions
@transitions ||= []
end
attr_writer :transitions
def epsilon_transitions
transitions.select { |t| t.epsilon_transition? }
end
# Accepts the following hash arguments:
#
# :machine => m (optional). Links current state to start state of machine
# given with an epsilon transition.
# :start_state => true | false. Make the state a start state. Defaults to false
# :final => true | false. Make the state a final state. Defaults to false
# :state => a_state (if none passed, a new one is constructed)
# :symbol => Symbol to transition to.
# :epsilon => An Epsilon Transition instead of a regular symbol transition
# :any => An any symbol transition. Equivalent to a regex '.'
#
def add_transition(args={})
args[:start_state] = false unless args.has_key?(:start_state)
if args[:machine]
machine = args[:machine]
args[:state] = machine.start_state
args[:state].start_state = false
args[:epsilon] = true
else
args[:state] ||= State.new(args)
end
returning args[:state] do |state|
transitions << transition_for(args, state)
yield(state) if block_given?
state
end
end
def transition_for(args, state)
if args[:epsilon]
EpsilonTransition.new(state)
elsif args[:any]
AnyCharTransition.new(state)
else
Transition.new(args[:symbol], state)
end
end
def start_state?
@start_state.equal?(false) ? false : true
end
attr_writer :start_state
def final_state?
@final_state ? true : false
end
alias_method :final?, :final_state?
attr_writer :final_state
def substates(excluded_states = [])
returning [] do |list|
follow_states.each do |state|
unless excluded_states.include?(state)
excluded_states << state
list.push state
list.push *state.substates(excluded_states)
end
end
end
end
def follow_states(excluded_states = [])
transitions.map { |t| t.state }.reject { |s| excluded_states.include?(s) }
end
def add_transitions_to_table(table)
transitions.each do |transition|
to = transition.to
unless table.has_state_change?(self, to, transition.symbol)
table.add_state_change(self, to, transition.symbol)
transition.to.add_transitions_to_table(table)
end
end
end
def deep_clone
returning clone do |c|
c.transitions = transitions.map { |t| t.deep_clone }
end
end
private
def assign_name(options)
@name = options[:name] ? options[:name] : "State #{@id}"
end
end
end
end
|
smtlaissezfaire/hopcroft
|
d1d8b8fa343a57165fa587eb3aaf0b847909b795
|
Move unique id tracking into a module
|
diff --git a/lib/hopcroft/machine.rb b/lib/hopcroft/machine.rb
index f09720e..47b3b37 100644
--- a/lib/hopcroft/machine.rb
+++ b/lib/hopcroft/machine.rb
@@ -1,17 +1,18 @@
module Hopcroft
module Machine
extend Using
+ using :Identifiable
using :TransitionTable
using :NfaTransitionTable
using :DfaTransitionTable
using :State
using :Transition
using :StateMachine
using :EpsilonTransition
using :AnyCharTransition
using :TableConverter
using :TableDisplayer
using :StateMachineHelpers
end
end
diff --git a/lib/hopcroft/machine/identifiable.rb b/lib/hopcroft/machine/identifiable.rb
new file mode 100644
index 0000000..767b26a
--- /dev/null
+++ b/lib/hopcroft/machine/identifiable.rb
@@ -0,0 +1,38 @@
+module Hopcroft
+ module Machine
+ module Identifiable
+ class << self
+ def extended(mod)
+ mod.extend ClassMethods
+ mod.class_eval do
+ include InstanceMethods
+ end
+ end
+ end
+
+ module ClassMethods
+ def reset_counter!
+ @counter = 1
+ end
+
+ def next_counter
+ returning counter do |c|
+ @counter += 1
+ end
+ end
+
+ def counter
+ @counter ||= 1
+ end
+ end
+
+ module InstanceMethods
+ def track_id
+ @id = self.class.next_counter
+ end
+
+ attr_reader :id
+ end
+ end
+ end
+end
\ No newline at end of file
diff --git a/lib/hopcroft/machine/state.rb b/lib/hopcroft/machine/state.rb
index b4399d5..602a88c 100644
--- a/lib/hopcroft/machine/state.rb
+++ b/lib/hopcroft/machine/state.rb
@@ -1,141 +1,128 @@
module Hopcroft
module Machine
class State
- class << self
- def reset_counter!
- @counter = 1
- end
-
- def next_counter
- returning counter do |c|
- @counter += 1
- end
- end
-
- def counter
- @counter ||= 1
- end
- end
+ extend Identifiable
def initialize(options={})
+ track_id
+
@start_state = options[:start_state] if options.has_key?(:start_state)
@final_state = options[:final] if options.has_key?(:final)
- @id = self.class.next_counter
+
assign_name(options)
end
attr_reader :name
- attr_reader :id
alias_method :to_s, :name
def inspect
"#{name} {start: #{start_state?}, final: #{final_state?}, transitions: #{transitions.size}}"
end
def transitions
@transitions ||= []
end
attr_writer :transitions
def epsilon_transitions
transitions.select { |t| t.epsilon_transition? }
end
# Accepts the following hash arguments:
#
# :machine => m (optional). Links current state to start state of machine
# given with an epsilon transition.
# :start_state => true | false. Make the state a start state. Defaults to false
# :final => true | false. Make the state a final state. Defaults to false
# :state => a_state (if none passed, a new one is constructed)
# :symbol => Symbol to transition to.
# :epsilon => An Epsilon Transition instead of a regular symbol transition
# :any => An any symbol transition. Equivalent to a regex '.'
#
def add_transition(args={})
args[:start_state] = false unless args.has_key?(:start_state)
if args[:machine]
machine = args[:machine]
args[:state] = machine.start_state
args[:state].start_state = false
args[:epsilon] = true
else
args[:state] ||= State.new(args)
end
returning args[:state] do |state|
transitions << transition_for(args, state)
yield(state) if block_given?
state
end
end
def transition_for(args, state)
if args[:epsilon]
EpsilonTransition.new(state)
elsif args[:any]
AnyCharTransition.new(state)
else
Transition.new(args[:symbol], state)
end
end
def start_state?
@start_state.equal?(false) ? false : true
end
attr_writer :start_state
def final_state?
@final_state ? true : false
end
alias_method :final?, :final_state?
attr_writer :final_state
def substates(excluded_states = [])
returning [] do |list|
follow_states.each do |state|
unless excluded_states.include?(state)
excluded_states << state
list.push state
list.push *state.substates(excluded_states)
end
end
end
end
def follow_states(excluded_states = [])
transitions.map { |t| t.state }.reject { |s| excluded_states.include?(s) }
end
def add_transitions_to_table(table)
transitions.each do |transition|
to = transition.to
unless table.has_state_change?(self, to, transition.symbol)
table.add_state_change(self, to, transition.symbol)
transition.to.add_transitions_to_table(table)
end
end
end
def deep_clone
returning clone do |c|
c.transitions = transitions.map { |t| t.deep_clone }
end
end
private
def assign_name(options)
@name = options[:name] ? options[:name] : "State #{@id}"
end
end
end
end
|
smtlaissezfaire/hopcroft
|
b886eba1281094be4656ff10a86a04213230b59e
|
Don't add the same state twice when calculating epsilon_closure. Don't inifinite recur.
|
diff --git a/lib/hopcroft/machine/state_machine_helpers.rb b/lib/hopcroft/machine/state_machine_helpers.rb
index a566c6c..bc35ce9 100644
--- a/lib/hopcroft/machine/state_machine_helpers.rb
+++ b/lib/hopcroft/machine/state_machine_helpers.rb
@@ -1,25 +1,26 @@
module Hopcroft
module Machine
module StateMachineHelpers
def epsilon_closure(state_list)
returning [] do |return_list|
state_list.each do |state|
return_list.concat(epsilon_closure_for_state(state))
end
end
end
private
- def epsilon_closure_for_state(state)
+ def epsilon_closure_for_state(state, seen_states = [])
returning [] do |set|
- set << state
- state.epsilon_transitions.each do |transition|
- set << transition.state
- set.concat(epsilon_closure_for_state(transition.state))
+ if !seen_states.include?(state)
+ set << state
+ state.epsilon_transitions.each do |transition|
+ set.concat(epsilon_closure_for_state(transition.state, seen_states << state))
+ end
end
end
end
end
end
end
diff --git a/spec/hopcoft/machine/state_machine_helpers_spec.rb b/spec/hopcoft/machine/state_machine_helpers_spec.rb
index 3530641..2230715 100644
--- a/spec/hopcoft/machine/state_machine_helpers_spec.rb
+++ b/spec/hopcoft/machine/state_machine_helpers_spec.rb
@@ -1,67 +1,83 @@
require File.expand_path(File.dirname(__FILE__) + "/../../spec_helper")
module Hopcroft
module Machine
describe StateMachineHelpers do
before do
@obj = Object.new
@obj.extend StateMachineHelpers
end
describe "epsilon_closure" do
it "should be an empty list when given an empty list" do
@obj.epsilon_closure([]).should == []
end
it "should return the state when the state has no transitions" do
state = State.new
@obj.epsilon_closure([state]).should == [state]
end
it "should return an epsilon closure target" do
state1 = State.new
state2 = State.new
state1.add_transition :state => state2, :epsilon => true
@obj.epsilon_closure([state1]).should include(state2)
end
it "should not return a target with a state1 => state2 on a regular transition symbol" do
state1 = State.new
state2 = State.new
state1.add_transition :symbol => :sym, :state => state2
@obj.epsilon_closure([state1]).should_not include(state2)
end
it "should follow epsilon targets from several states" do
state1 = State.new
state2 = State.new
state1.add_transition :state => state2, :epsilon => true
state3 = State.new
state4 = State.new
state3.add_transition :state => state4, :epsilon => true
@obj.epsilon_closure([state1, state3]).should include(state2)
@obj.epsilon_closure([state1, state3]).should include(state4)
end
it "should find multiple levels of epsilon transitions" do
state1 = State.new
state2 = State.new
state3 = State.new
state1.add_transition :state => state2, :epsilon => true
state2.add_transition :state => state3, :epsilon => true
@obj.epsilon_closure([state1]).should include(state2)
@obj.epsilon_closure([state1]).should include(state3)
end
+
+ it "should not recur infinitely" do
+ state = State.new
+
+ state.add_transition :state => state, :epsilon => true
+
+ @obj.epsilon_closure([state]).should include(state)
+ end
+
+ it "should not include the same state twice" do
+ state = State.new
+
+ state.add_transition :state => state, :epsilon => true
+
+ @obj.epsilon_closure([state]).should == [state]
+ end
end
end
end
end
|
smtlaissezfaire/hopcroft
|
3aa8ee5b043adeffc54a06905757d1d53578a539
|
Add epsilon-closure function
|
diff --git a/lib/hopcroft/machine.rb b/lib/hopcroft/machine.rb
index 35c207f..f09720e 100644
--- a/lib/hopcroft/machine.rb
+++ b/lib/hopcroft/machine.rb
@@ -1,16 +1,17 @@
module Hopcroft
module Machine
extend Using
using :TransitionTable
using :NfaTransitionTable
using :DfaTransitionTable
using :State
using :Transition
using :StateMachine
using :EpsilonTransition
using :AnyCharTransition
using :TableConverter
using :TableDisplayer
+ using :StateMachineHelpers
end
end
diff --git a/lib/hopcroft/machine/state.rb b/lib/hopcroft/machine/state.rb
index cbe21f9..b4399d5 100644
--- a/lib/hopcroft/machine/state.rb
+++ b/lib/hopcroft/machine/state.rb
@@ -1,137 +1,141 @@
module Hopcroft
module Machine
class State
class << self
def reset_counter!
@counter = 1
end
def next_counter
returning counter do |c|
@counter += 1
end
end
def counter
@counter ||= 1
end
end
def initialize(options={})
@start_state = options[:start_state] if options.has_key?(:start_state)
@final_state = options[:final] if options.has_key?(:final)
@id = self.class.next_counter
assign_name(options)
end
attr_reader :name
attr_reader :id
alias_method :to_s, :name
def inspect
"#{name} {start: #{start_state?}, final: #{final_state?}, transitions: #{transitions.size}}"
end
def transitions
@transitions ||= []
end
attr_writer :transitions
+
+ def epsilon_transitions
+ transitions.select { |t| t.epsilon_transition? }
+ end
# Accepts the following hash arguments:
#
# :machine => m (optional). Links current state to start state of machine
# given with an epsilon transition.
# :start_state => true | false. Make the state a start state. Defaults to false
# :final => true | false. Make the state a final state. Defaults to false
# :state => a_state (if none passed, a new one is constructed)
# :symbol => Symbol to transition to.
# :epsilon => An Epsilon Transition instead of a regular symbol transition
# :any => An any symbol transition. Equivalent to a regex '.'
#
def add_transition(args={})
args[:start_state] = false unless args.has_key?(:start_state)
if args[:machine]
machine = args[:machine]
args[:state] = machine.start_state
args[:state].start_state = false
args[:epsilon] = true
else
args[:state] ||= State.new(args)
end
returning args[:state] do |state|
transitions << transition_for(args, state)
yield(state) if block_given?
state
end
end
def transition_for(args, state)
if args[:epsilon]
EpsilonTransition.new(state)
elsif args[:any]
AnyCharTransition.new(state)
else
Transition.new(args[:symbol], state)
end
end
def start_state?
@start_state.equal?(false) ? false : true
end
attr_writer :start_state
def final_state?
@final_state ? true : false
end
alias_method :final?, :final_state?
attr_writer :final_state
def substates(excluded_states = [])
returning [] do |list|
follow_states.each do |state|
unless excluded_states.include?(state)
excluded_states << state
list.push state
list.push *state.substates(excluded_states)
end
end
end
end
def follow_states(excluded_states = [])
transitions.map { |t| t.state }.reject { |s| excluded_states.include?(s) }
end
def add_transitions_to_table(table)
transitions.each do |transition|
to = transition.to
unless table.has_state_change?(self, to, transition.symbol)
table.add_state_change(self, to, transition.symbol)
transition.to.add_transitions_to_table(table)
end
end
end
def deep_clone
returning clone do |c|
c.transitions = transitions.map { |t| t.deep_clone }
end
end
private
def assign_name(options)
@name = options[:name] ? options[:name] : "State #{@id}"
end
end
end
end
diff --git a/lib/hopcroft/machine/state_machine_helpers.rb b/lib/hopcroft/machine/state_machine_helpers.rb
new file mode 100644
index 0000000..a566c6c
--- /dev/null
+++ b/lib/hopcroft/machine/state_machine_helpers.rb
@@ -0,0 +1,25 @@
+module Hopcroft
+ module Machine
+ module StateMachineHelpers
+ def epsilon_closure(state_list)
+ returning [] do |return_list|
+ state_list.each do |state|
+ return_list.concat(epsilon_closure_for_state(state))
+ end
+ end
+ end
+
+ private
+
+ def epsilon_closure_for_state(state)
+ returning [] do |set|
+ set << state
+ state.epsilon_transitions.each do |transition|
+ set << transition.state
+ set.concat(epsilon_closure_for_state(transition.state))
+ end
+ end
+ end
+ end
+ end
+end
diff --git a/lib/hopcroft/machine/transition.rb b/lib/hopcroft/machine/transition.rb
index 3894afc..2dac679 100644
--- a/lib/hopcroft/machine/transition.rb
+++ b/lib/hopcroft/machine/transition.rb
@@ -1,18 +1,22 @@
module Hopcroft
module Machine
class Transition
def initialize(symbol, state)
@symbol = symbol.respond_to?(:to_sym) ? symbol.to_sym : symbol
@state = state
end
attr_reader :symbol
attr_reader :state
alias_method :to, :state
def deep_clone
self.class.new(symbol, state.deep_clone)
end
+
+ def epsilon_transition?
+ symbol == EpsilonTransition
+ end
end
end
end
diff --git a/spec/hopcoft/machine/state_machine_helpers_spec.rb b/spec/hopcoft/machine/state_machine_helpers_spec.rb
new file mode 100644
index 0000000..3530641
--- /dev/null
+++ b/spec/hopcoft/machine/state_machine_helpers_spec.rb
@@ -0,0 +1,67 @@
+require File.expand_path(File.dirname(__FILE__) + "/../../spec_helper")
+
+module Hopcroft
+ module Machine
+ describe StateMachineHelpers do
+ before do
+ @obj = Object.new
+ @obj.extend StateMachineHelpers
+ end
+
+ describe "epsilon_closure" do
+ it "should be an empty list when given an empty list" do
+ @obj.epsilon_closure([]).should == []
+ end
+
+ it "should return the state when the state has no transitions" do
+ state = State.new
+
+ @obj.epsilon_closure([state]).should == [state]
+ end
+
+ it "should return an epsilon closure target" do
+ state1 = State.new
+ state2 = State.new
+
+ state1.add_transition :state => state2, :epsilon => true
+
+ @obj.epsilon_closure([state1]).should include(state2)
+ end
+
+ it "should not return a target with a state1 => state2 on a regular transition symbol" do
+ state1 = State.new
+ state2 = State.new
+
+ state1.add_transition :symbol => :sym, :state => state2
+
+ @obj.epsilon_closure([state1]).should_not include(state2)
+ end
+
+ it "should follow epsilon targets from several states" do
+ state1 = State.new
+ state2 = State.new
+ state1.add_transition :state => state2, :epsilon => true
+
+ state3 = State.new
+ state4 = State.new
+ state3.add_transition :state => state4, :epsilon => true
+
+ @obj.epsilon_closure([state1, state3]).should include(state2)
+ @obj.epsilon_closure([state1, state3]).should include(state4)
+ end
+
+ it "should find multiple levels of epsilon transitions" do
+ state1 = State.new
+ state2 = State.new
+ state3 = State.new
+
+ state1.add_transition :state => state2, :epsilon => true
+ state2.add_transition :state => state3, :epsilon => true
+
+ @obj.epsilon_closure([state1]).should include(state2)
+ @obj.epsilon_closure([state1]).should include(state3)
+ end
+ end
+ end
+ end
+end
|
smtlaissezfaire/hopcroft
|
54ffc4b74e9c6eda816d02aec7824330a624d8e3
|
Add a TODO
|
diff --git a/TODO b/TODO
index 09dbda4..4c7025c 100644
--- a/TODO
+++ b/TODO
@@ -1,6 +1,7 @@
- More integration specs
- NFA -> DFA minimization using the subset construction algorithm (or another, if there is one).
- Integration tests should be matching against both the DFA + NFA matching techniques
- Google Protocol buffer output for DFA state table. Conversion of the buffer to a regex.
- Unicode support
-- Empty regexs (//) should be matched by an empty string
\ No newline at end of file
+- Empty regexs (//) should be matched by an empty string
+- Get rid of NFA transition table in favor of DFA transition table
\ No newline at end of file
|
smtlaissezfaire/hopcroft
|
27ca0e873d335d6b50beb90723d2fae242dd0218
|
Add state ids
|
diff --git a/lib/hopcroft/machine/state.rb b/lib/hopcroft/machine/state.rb
index 72d4e87..cbe21f9 100644
--- a/lib/hopcroft/machine/state.rb
+++ b/lib/hopcroft/machine/state.rb
@@ -1,135 +1,137 @@
module Hopcroft
module Machine
class State
class << self
def reset_counter!
@counter = 1
end
def next_counter
returning counter do |c|
@counter += 1
end
end
def counter
@counter ||= 1
end
end
def initialize(options={})
@start_state = options[:start_state] if options.has_key?(:start_state)
@final_state = options[:final] if options.has_key?(:final)
+ @id = self.class.next_counter
assign_name(options)
end
- attr_reader :name
- alias_method :to_s, :name
+ attr_reader :name
+ attr_reader :id
+ alias_method :to_s, :name
def inspect
"#{name} {start: #{start_state?}, final: #{final_state?}, transitions: #{transitions.size}}"
end
def transitions
@transitions ||= []
end
attr_writer :transitions
# Accepts the following hash arguments:
#
# :machine => m (optional). Links current state to start state of machine
# given with an epsilon transition.
# :start_state => true | false. Make the state a start state. Defaults to false
# :final => true | false. Make the state a final state. Defaults to false
# :state => a_state (if none passed, a new one is constructed)
# :symbol => Symbol to transition to.
# :epsilon => An Epsilon Transition instead of a regular symbol transition
# :any => An any symbol transition. Equivalent to a regex '.'
#
def add_transition(args={})
args[:start_state] = false unless args.has_key?(:start_state)
if args[:machine]
machine = args[:machine]
args[:state] = machine.start_state
args[:state].start_state = false
args[:epsilon] = true
else
args[:state] ||= State.new(args)
end
returning args[:state] do |state|
transitions << transition_for(args, state)
yield(state) if block_given?
state
end
end
def transition_for(args, state)
if args[:epsilon]
EpsilonTransition.new(state)
elsif args[:any]
AnyCharTransition.new(state)
else
Transition.new(args[:symbol], state)
end
end
def start_state?
@start_state.equal?(false) ? false : true
end
attr_writer :start_state
def final_state?
@final_state ? true : false
end
alias_method :final?, :final_state?
attr_writer :final_state
def substates(excluded_states = [])
returning [] do |list|
follow_states.each do |state|
unless excluded_states.include?(state)
excluded_states << state
list.push state
list.push *state.substates(excluded_states)
end
end
end
end
def follow_states(excluded_states = [])
transitions.map { |t| t.state }.reject { |s| excluded_states.include?(s) }
end
def add_transitions_to_table(table)
transitions.each do |transition|
to = transition.to
unless table.has_state_change?(self, to, transition.symbol)
table.add_state_change(self, to, transition.symbol)
transition.to.add_transitions_to_table(table)
end
end
end
def deep_clone
returning clone do |c|
c.transitions = transitions.map { |t| t.deep_clone }
end
end
private
def assign_name(options)
- @name = options[:name] ? options[:name] : "State #{self.class.next_counter}"
+ @name = options[:name] ? options[:name] : "State #{@id}"
end
end
end
end
diff --git a/spec/hopcoft/machine/state_spec.rb b/spec/hopcoft/machine/state_spec.rb
index cceaf22..743d334 100644
--- a/spec/hopcoft/machine/state_spec.rb
+++ b/spec/hopcoft/machine/state_spec.rb
@@ -1,302 +1,321 @@
require File.expand_path(File.dirname(__FILE__) + "/../../spec_helper")
module Hopcroft
module Machine
describe State do
it "should set the start state on the first state to a start state" do
state = State.new
state.should be_a_start_state
end
it "should have no transitions to begin with" do
s = State.new
s.transitions.should == []
end
it "should be able to add transitions" do
s = State.new
s.add_transition :symbol => :foo
s.transitions.size.should == 1
end
it "should be a start state" do
s = State.new
s.should be_a_start_state
end
it "should have start state assigned" do
s = State.new
s.start_state = false
s.should_not be_a_start_state
end
it "should not be a final state by default" do
s = State.new
s.should_not be_a_final_state
s.should_not be_final
end
it "should have the final state as assignable" do
s = State.new
s.final_state = true
s.should be_a_final_state
s.should be_final
end
describe "transitions" do
before do
@state = State.new
end
it "should create a transition when calling add_transition" do
@state.add_transition :symbol => :foo
@state.transitions.first.should be_a_kind_of(Transition)
end
it "should pass on the symbol to the transition" do
@state.add_transition :symbol => :baz
transition = @state.transitions.first
transition.symbol.should == :baz
end
it "should construct a new state when none provided" do
@state.add_transition :symbol => :foo
transition = @state.transitions.first
transition.state.should be_a_kind_of(State)
end
it "should not have the new state as the start state" do
@state.add_transition :symbol => :foo
transition = @state.transitions.first
transition.state.should_not be_a_start_state
end
it "should be able to mark the new state as a final state" do
@state.add_transition :symbol => :foo, :final => true
transition = @state.transitions.first
transition.state.should be_a_final_state
end
it "should take another state as the transition target" do
state = mock('state', :null_object => true)
@state.add_transition :symbol => :foo, :state => state
transition = @state.transitions.first
transition.state.should == state
end
it "should be able to add transitions recursively" do
s1 = State.new
s2 = State.new
s1.add_transition :state => s2, :epsilon => true
s2.add_transition :state => s1, :epsilon => true
table = NfaTransitionTable.new
s1.add_transitions_to_table(table)
end
describe "passed :machine => m" do
before do
@state = State.new
@machine = StateMachine.new
end
it "should add a transition to another state machines first state" do
other_machine_start_state = @machine.start_state
@state.add_transition :machine => @machine
@state.transitions.first.state.should == other_machine_start_state
end
it "should add the transition as an epsilon transition" do
@state.add_transition :machine => @machine
@state.transitions.first.should be_a_kind_of(EpsilonTransition)
end
it "should no longer have the other machines start state as a start state in this machine" do
other_machine_start_state = @machine.start_state
@state.add_transition :machine => @machine
@state.transitions.first.state.should_not be_a_start_state
end
end
end
describe "name" do
it "should take a name param" do
state = State.new(:name => "foo")
state.name.should == "foo"
end
it "should auto-assign a state #" do
State.reset_counter!
state = State.new
state.name.should == "State 1"
end
it "should assign 'State 2' for the second state created" do
State.reset_counter!
State.new
state2 = State.new
state2.name.should == "State 2"
end
end
describe "to_s" do
it "should be aliased to the name" do
s = State.new
s.method(:name).should == s.method(:to_s)
end
end
describe "inspect" do
it "should display the name" do
s = State.new(:name => "State 1")
s.inspect.should include("State 1")
end
it "should show start state, final state, etc." do
s = State.new(:name => "State 1", :start_state => true, :final => true)
s.inspect.should == "State 1 {start: true, final: true, transitions: 0}"
end
it "should display the correct value for the start state" do
s = State.new(:name => "State 1", :start_state => false, :final => true)
s.inspect.should == "State 1 {start: false, final: true, transitions: 0}"
end
it "should display the correct value for the final state" do
s = State.new(:name => "State 1", :start_state => true, :final => false)
s.inspect.should == "State 1 {start: true, final: false, transitions: 0}"
end
it "should display 1 transition" do
s = State.new(:name => "State 1", :start_state => true, :final => true)
s.add_transition
s.inspect.should == "State 1 {start: true, final: true, transitions: 1}"
end
end
describe "deep_clone" do
before do
@state = State.new
end
it "should be of class State" do
clone = @state.deep_clone
clone.should be_a_kind_of(State)
end
it "should be a new instance" do
clone = @state.deep_clone
clone.should_not equal(@state)
end
it "should be a final state if the original was a final state" do
@state.final_state = true
clone = @state.deep_clone
clone.should be_a_final_state
end
it "should not have the same transition objects" do
@state.add_transition
transition = @state.transitions.first
clone = @state.deep_clone
clone.transitions.first.should_not equal(transition)
end
it "should have one transition if the original had one transition" do
@state.add_transition
clone = @state.deep_clone
clone.transitions.size.should == 1
end
it "should have two transitions if the original had two transition" do
@state.add_transition
@state.add_transition
clone = @state.deep_clone
clone.transitions.size.should == 2
end
it "should have a transition as a Transition object" do
@state.add_transition
clone = @state.deep_clone
clone.transitions.first.should be_a_kind_of(Transition)
end
it "should call deep_clone on the transitions" do
@state.add_transition
@state.transitions.first.should_receive(:deep_clone)
@state.deep_clone
end
end
describe "substates" do
before do
@state = State.new
end
it "should have none with no transitions" do
@state.substates.should == []
end
it "should have a state which is linked to by a transition" do
new_state = @state.add_transition :symbol => :foo
@state.substates.should == [new_state]
end
it "should have multiple states" do
one = @state.add_transition :symbol => :foo
two = @state.add_transition :symbol => :foo
@state.substates.should == [one, two]
end
it "should show states of the states (should find the states substates recursively)" do
substate = @state.add_transition :symbol => :foo
sub_substate = substate.add_transition :symbol => :foo
@state.substates.should == [substate, sub_substate]
end
it "should work with recursive transitions" do
@state.add_transition :state => @state
@state.substates.should == [@state]
end
it "should not find duplicate states" do
state2 = @state.add_transition
state3 = state2.add_transition
state4 = state3.add_transition
state5 = state2.add_transition
state4.add_transition :state => state5
@state.substates.should == [state2, state3, state4, state5]
end
it "should deal with infinite recursion on more than one level" do
state2 = @state.add_transition
state3 = state2.add_transition
state3.add_transition :state => @state
@state.substates.should == [state2, state3, @state]
end
end
+
+ describe "id" do
+ it "should have an id as an integer" do
+ State.new.id.should be_a_kind_of(Fixnum)
+ end
+
+ it "should be auto-incrementing" do
+ s1 = State.new
+ s2 = State.new
+
+ s2.id.should equal(s1.id + 1)
+ end
+
+ it "should be 1 after reset_counter! is called" do
+ State.reset_counter!
+ s1 = State.new
+ s1.id.should equal(1)
+ end
+ end
end
end
end
|
smtlaissezfaire/hopcroft
|
e58cf3c7b070bf836cf723f3385584a514c6ab4a
|
Enough spec'ing for rcov
|
diff --git a/lib/hopcroft/machine/transition_table.rb b/lib/hopcroft/machine/transition_table.rb
index 86437df..4e2b709 100644
--- a/lib/hopcroft/machine/transition_table.rb
+++ b/lib/hopcroft/machine/transition_table.rb
@@ -1,40 +1,39 @@
module Hopcroft
module Machine
class TransitionTable < Hash
class MissingStartState < StandardError; end
attr_accessor :start_state
- # def add_state_change(from, to, sym)
- # self[from] ||= {}
- # self[from][sym] = to
- # end
- #
+ def add_state_change(from, to, sym)
+ raise NotImplementedError
+ end
+
def has_state_change?(from, to, sym)
self[from] && self[from][sym]
end
def to_hash
Hash.new(self)
end
def inspect
TableDisplayer.new(self).to_s
end
- # def matches?(*args)
- # raise NotImplementedError
- # end
- #
+ def matches?(input_array)
+ raise NotImplementedError
+ end
+
def matched_by?(*args)
matches?(*args)
end
private
def raise_if_no_start_state
raise MissingStartState unless start_state
end
end
end
end
\ No newline at end of file
diff --git a/spec/hopcoft/machine/transition_table_spec.rb b/spec/hopcoft/machine/transition_table_spec.rb
new file mode 100644
index 0000000..89c515c
--- /dev/null
+++ b/spec/hopcoft/machine/transition_table_spec.rb
@@ -0,0 +1,23 @@
+require File.expand_path(File.dirname(__FILE__) + "/../../spec_helper")
+
+module Hopcroft
+ module Machine
+ describe TransitionTable do
+ before do
+ @table = TransitionTable.new
+ end
+
+ it "should raise a NotImplementedError when calling add_state_change" do
+ lambda {
+ @table.add_state_change :from, :to, :sym
+ }.should raise_error(NotImplementedError)
+ end
+
+ it "should raise a NotImplementedError when calling matches?" do
+ lambda {
+ @table.matches?([])
+ }.should raise_error(NotImplementedError)
+ end
+ end
+ end
+end
\ No newline at end of file
|
smtlaissezfaire/hopcroft
|
dc297790eb1c8ff8ccc7be70002a74a951891c09
|
Refactor
|
diff --git a/lib/hopcroft/machine/dfa_transition_table.rb b/lib/hopcroft/machine/dfa_transition_table.rb
index 54431d2..3a88da0 100644
--- a/lib/hopcroft/machine/dfa_transition_table.rb
+++ b/lib/hopcroft/machine/dfa_transition_table.rb
@@ -1,42 +1,42 @@
module Hopcroft
module Machine
class DfaTransitionTable < TransitionTable
class DuplicateStateError < StandardError; end
def add_state_change(from, to, sym)
self[from] ||= {}
raise DuplicateStateError if self[from][sym]
self[from][sym] = to
end
def has_state_change?(from, to, sym)
- self[from] && self[from][sym] && self[from][sym] == to ? true : false
+ super && self[from][sym] == to ? true : false
end
def target_for(state, sym)
self[state] && self[state][sym] ? self[state][sym] : nil
end
def matches?(input_array, target = start_state)
raise_if_no_start_state
input_array.each do |char|
target = target_for(target, char)
return false unless target
end
final_state? target
end
alias_method :initial_state, :start_state
alias_method :initial_states, :start_state
alias_method :next_transitions, :target_for
private
def final_state?(target)
target.final?
end
end
end
end
diff --git a/lib/hopcroft/machine/nfa_transition_table.rb b/lib/hopcroft/machine/nfa_transition_table.rb
index 3238642..c731d35 100644
--- a/lib/hopcroft/machine/nfa_transition_table.rb
+++ b/lib/hopcroft/machine/nfa_transition_table.rb
@@ -1,86 +1,84 @@
module Hopcroft
module Machine
class NfaTransitionTable < TransitionTable
def start_state=(start_state)
self[start_state] ||= {}
super
end
# Create a transition without marking appropriate start states
def add_state_change(from_state, to_state, transition_symbol)
sym = transition_symbol
self[from_state] ||= {}
self[from_state][sym] ||= []
self[from_state][sym] << to_state
end
def has_state_change?(from_state, to_state, transition_symbol)
- self[from_state] &&
- self[from_state][transition_symbol] &&
- self[from_state][transition_symbol].include?(to_state)
+ super && self[from_state][transition_symbol].include?(to_state)
end
def targets_for(state, transition_sym)
find_targets_matching(state, transition_sym) do |target|
epsilon_states_following(target)
end
end
def initial_states
[start_state] + epsilon_states_following(start_state)
end
def next_transitions(states, sym)
states.map { |s| targets_for(s, sym) }.compact.flatten
end
def matches?(input_array, current_states = initial_states)
raise_if_no_start_state
input_array.each do |sym|
current_states = next_transitions(current_states, sym.to_sym)
end
current_states.any? { |state| state.final? }
end
private
def epsilon_states_following(state)
find_targets_matching(state, EpsilonTransition) do |target|
epsilon_states_following(target)
end
end
def find_targets_matching(state, transition_sym, &recursion_block)
returning Array.new do |a|
direct_targets = find_targets_for(state, transition_sym)
append a, direct_targets
direct_targets.each do |target|
append a, recursion_block.call(target)
end
end
end
def find_targets_for(state, transition_sym)
returning Array.new do |a|
if state = self[state]
if state[transition_sym]
append a, state[transition_sym]
end
if state[AnyCharTransition] && transition_sym != EpsilonTransition
append a, state[AnyCharTransition]
end
end
end
end
def append(array1, array2)
array1.push *array2
end
end
end
end
diff --git a/lib/hopcroft/machine/transition_table.rb b/lib/hopcroft/machine/transition_table.rb
index 0a96ae7..86437df 100644
--- a/lib/hopcroft/machine/transition_table.rb
+++ b/lib/hopcroft/machine/transition_table.rb
@@ -1,40 +1,40 @@
module Hopcroft
module Machine
class TransitionTable < Hash
class MissingStartState < StandardError; end
attr_accessor :start_state
# def add_state_change(from, to, sym)
# self[from] ||= {}
# self[from][sym] = to
# end
#
- # def has_state_change?(from, to, sym)
- # self[from] && self[from][sym]
- # end
+ def has_state_change?(from, to, sym)
+ self[from] && self[from][sym]
+ end
def to_hash
Hash.new(self)
end
def inspect
TableDisplayer.new(self).to_s
end
# def matches?(*args)
# raise NotImplementedError
# end
#
def matched_by?(*args)
matches?(*args)
end
private
def raise_if_no_start_state
raise MissingStartState unless start_state
end
end
end
end
\ No newline at end of file
|
smtlaissezfaire/hopcroft
|
98e16913cdd66e72b44d0063012c11d41f9b6e5d
|
Comment out code which isn't covered as reported by rcov
|
diff --git a/lib/hopcroft/machine/transition_table.rb b/lib/hopcroft/machine/transition_table.rb
index 7695b40..0a96ae7 100644
--- a/lib/hopcroft/machine/transition_table.rb
+++ b/lib/hopcroft/machine/transition_table.rb
@@ -1,40 +1,40 @@
module Hopcroft
module Machine
class TransitionTable < Hash
class MissingStartState < StandardError; end
attr_accessor :start_state
- def add_state_change(from, to, sym)
- self[from] ||= {}
- self[from][sym] = to
- end
-
- def has_state_change?(from, to, sym)
- self[from] && self[from][sym]
- end
+ # def add_state_change(from, to, sym)
+ # self[from] ||= {}
+ # self[from][sym] = to
+ # end
+ #
+ # def has_state_change?(from, to, sym)
+ # self[from] && self[from][sym]
+ # end
def to_hash
Hash.new(self)
end
def inspect
TableDisplayer.new(self).to_s
end
- def matches?(*args)
- raise NotImplementedError
- end
-
+ # def matches?(*args)
+ # raise NotImplementedError
+ # end
+ #
def matched_by?(*args)
matches?(*args)
end
private
def raise_if_no_start_state
raise MissingStartState unless start_state
end
end
end
end
\ No newline at end of file
|
smtlaissezfaire/hopcroft
|
10aeefcedb34ec07760bb4f14aeca5683e8e9635
|
Fix matching in DFAs.
|
diff --git a/lib/hopcroft/machine/dfa_transition_table.rb b/lib/hopcroft/machine/dfa_transition_table.rb
index 1f1b03c..54431d2 100644
--- a/lib/hopcroft/machine/dfa_transition_table.rb
+++ b/lib/hopcroft/machine/dfa_transition_table.rb
@@ -1,25 +1,42 @@
module Hopcroft
module Machine
class DfaTransitionTable < TransitionTable
class DuplicateStateError < StandardError; end
def add_state_change(from, to, sym)
self[from] ||= {}
raise DuplicateStateError if self[from][sym]
self[from][sym] = to
end
def has_state_change?(from, to, sym)
self[from] && self[from][sym] && self[from][sym] == to ? true : false
end
def target_for(state, sym)
self[state] && self[state][sym] ? self[state][sym] : nil
end
+ def matches?(input_array, target = start_state)
+ raise_if_no_start_state
+
+ input_array.each do |char|
+ target = target_for(target, char)
+ return false unless target
+ end
+
+ final_state? target
+ end
+
alias_method :initial_state, :start_state
alias_method :initial_states, :start_state
alias_method :next_transitions, :target_for
+
+ private
+
+ def final_state?(target)
+ target.final?
+ end
end
end
end
diff --git a/lib/hopcroft/machine/nfa_transition_table.rb b/lib/hopcroft/machine/nfa_transition_table.rb
index 4cfa29e..3238642 100644
--- a/lib/hopcroft/machine/nfa_transition_table.rb
+++ b/lib/hopcroft/machine/nfa_transition_table.rb
@@ -1,76 +1,86 @@
module Hopcroft
module Machine
class NfaTransitionTable < TransitionTable
def start_state=(start_state)
self[start_state] ||= {}
super
end
# Create a transition without marking appropriate start states
def add_state_change(from_state, to_state, transition_symbol)
sym = transition_symbol
self[from_state] ||= {}
self[from_state][sym] ||= []
self[from_state][sym] << to_state
end
def has_state_change?(from_state, to_state, transition_symbol)
self[from_state] &&
self[from_state][transition_symbol] &&
self[from_state][transition_symbol].include?(to_state)
end
def targets_for(state, transition_sym)
find_targets_matching(state, transition_sym) do |target|
epsilon_states_following(target)
end
end
def initial_states
[start_state] + epsilon_states_following(start_state)
end
def next_transitions(states, sym)
states.map { |s| targets_for(s, sym) }.compact.flatten
end
+
+ def matches?(input_array, current_states = initial_states)
+ raise_if_no_start_state
+
+ input_array.each do |sym|
+ current_states = next_transitions(current_states, sym.to_sym)
+ end
+ current_states.any? { |state| state.final? }
+ end
+
private
def epsilon_states_following(state)
find_targets_matching(state, EpsilonTransition) do |target|
epsilon_states_following(target)
end
end
def find_targets_matching(state, transition_sym, &recursion_block)
returning Array.new do |a|
direct_targets = find_targets_for(state, transition_sym)
append a, direct_targets
direct_targets.each do |target|
append a, recursion_block.call(target)
end
end
end
def find_targets_for(state, transition_sym)
returning Array.new do |a|
if state = self[state]
if state[transition_sym]
append a, state[transition_sym]
end
if state[AnyCharTransition] && transition_sym != EpsilonTransition
append a, state[AnyCharTransition]
end
end
end
end
def append(array1, array2)
array1.push *array2
end
end
end
end
diff --git a/lib/hopcroft/machine/transition_table.rb b/lib/hopcroft/machine/transition_table.rb
index dfff988..7695b40 100644
--- a/lib/hopcroft/machine/transition_table.rb
+++ b/lib/hopcroft/machine/transition_table.rb
@@ -1,36 +1,40 @@
module Hopcroft
module Machine
class TransitionTable < Hash
class MissingStartState < StandardError; end
attr_accessor :start_state
def add_state_change(from, to, sym)
self[from] ||= {}
self[from][sym] = to
end
def has_state_change?(from, to, sym)
self[from] && self[from][sym]
end
- def matches?(input_array, current_states = initial_states)
- raise MissingStartState unless start_state
-
- input_array.each do |sym|
- current_states = next_transitions(current_states, sym.to_sym)
- end
-
- current_states.any? { |state| state.final? }
- end
-
def to_hash
Hash.new(self)
end
def inspect
TableDisplayer.new(self).to_s
end
+
+ def matches?(*args)
+ raise NotImplementedError
+ end
+
+ def matched_by?(*args)
+ matches?(*args)
+ end
+
+ private
+
+ def raise_if_no_start_state
+ raise MissingStartState unless start_state
+ end
end
end
end
\ No newline at end of file
diff --git a/spec/hopcoft/machine/dfa_transition_table_spec.rb b/spec/hopcoft/machine/dfa_transition_table_spec.rb
index 4771332..6063673 100644
--- a/spec/hopcoft/machine/dfa_transition_table_spec.rb
+++ b/spec/hopcoft/machine/dfa_transition_table_spec.rb
@@ -1,115 +1,175 @@
require File.expand_path(File.dirname(__FILE__) + "/../../spec_helper")
module Hopcroft
module Machine
describe DfaTransitionTable do
before do
@table = DfaTransitionTable.new
@state = State.new
end
it "should have the start state as assignable" do
@table.start_state = @state
@table.start_state.should equal(@state)
end
describe "adding state changes" do
before do
@state_two = State.new
end
it "should be able to add a state change with a symbol" do
@table.add_state_change(@state, @state_two, :symbol)
@table.has_state_change?(@state, @state_two, :symbol).should be_true
end
it "should not have a state change if none are provided" do
@table.has_state_change?(@state, @state_two, :symbol).should be_false
end
it "should not match the state change if with a different sym" do
@table.add_state_change(@state, @state_two, :symbol)
@table.has_state_change?(@state, @state_two, :bar).should be_false
end
it "should not match the state change with a different starting state" do
@table.add_state_change(@state, @state_two, :symbol)
@table.has_state_change?(mock('different state'), @state_two, :symbol).should be_false
end
it "should not match the state change with a different finishing state" do
@table.add_state_change(@state, @state_two, :symbol)
@table.has_state_change?(@state, mock('a different state'), :symbol).should be_false
end
it "should raise an error if a state change for the state & symbol has already been provided" do
@table.add_state_change(@state, @state_two, :symbol)
lambda {
@table.add_state_change(@state, mock("another target"), :symbol)
}.should raise_error(DfaTransitionTable::DuplicateStateError)
end
end
describe "target_for" do
before do
@state_two = State.new
end
it "should be the to symbol of the state change" do
@table.add_state_change(@state, @state_two, :symbol)
@table.target_for(@state, :symbol).should == @state_two
end
it "should return nil if it cannot find the state" do
@table.add_state_change(@state, @state_two, :symbol)
@table.target_for(mock("a different state"), :symbol).should be_nil
end
it "should return nil if it cannot find the symbol" do
@table.add_state_change(@state, @state_two, :symbol)
@table.target_for(@state, :foo).should be_nil
end
end
describe "to_hash" do
it "should return a hash" do
@table.to_hash.should be_a_kind_of(Hash)
end
it "should return a hash constructed from the table" do
Hash.should_receive(:new).with(@table)
@table.to_hash
end
end
describe "initial_states" do
it "should be the start state" do
@table.start_state = @state
@table.initial_state.should equal(@state)
end
end
describe "next_transitions" do
it "should be an alias for target_for" do
@table.method(:next_transitions).should == @table.method(:target_for)
end
end
describe "matches?" do
it "should raise an error if there is no start state" do
lambda {
@table.matches?("foo")
}.should raise_error(DfaTransitionTable::MissingStartState)
end
+
+ describe "with a start state which is a final state, with no transitions" do
+ before do
+ @state = State.new(:final => true)
+ @table.start_state = @state
+ end
+
+ it "should match the start state with no input chars" do
+ @table.should be_matched_by([])
+ end
+
+ it "should not match when given an input symbol" do
+ @table.should_not be_matched_by(["a"])
+ end
+ end
+
+ describe "with only a start state & no final states" do
+ before do
+ @state = State.new(:final => false)
+ @table.start_state = @state
+ end
+
+ it "should not match with no input" do
+ @table.should_not be_matched_by([])
+ end
+
+ it "should not match when given an input symbol" do
+ @table.should_not be_matched_by(["a"])
+ end
+ end
+
+ describe "with a start state which leads to a final state" do
+ before do
+ @state = State.new
+ @final_state = State.new(:final => true)
+
+ @table.start_state = @state
+ @table.add_state_change @state, @final_state, "a"
+ end
+
+ it "should not match when given no input" do
+ @table.should_not be_matched_by([])
+ end
+
+ it "should match when given the one char" do
+ @table.should be_matched_by(["a"])
+ end
+
+ it "should not match when given a different char" do
+ @table.should_not be_matched_by(["b"])
+ end
+
+ it "should not match when given the input symbol repeatedly" do
+ @table.should_not be_matched_by(["a", "a"])
+ end
+
+ it "should return false if it does not match" do
+ @table.matched_by?(["a", "a"]).should be_false
+ end
+ end
end
describe "inspect" do
it "should call TableDisplayer" do
TableDisplayer.should_receive(:new)
@table.inspect
end
end
end
end
end
\ No newline at end of file
|
smtlaissezfaire/hopcroft
|
4a0c326b21e54d1270225ad43ce6bb70e1a159d9
|
Add TODO list
|
diff --git a/TODO b/TODO
new file mode 100644
index 0000000..09dbda4
--- /dev/null
+++ b/TODO
@@ -0,0 +1,6 @@
+- More integration specs
+- NFA -> DFA minimization using the subset construction algorithm (or another, if there is one).
+ - Integration tests should be matching against both the DFA + NFA matching techniques
+- Google Protocol buffer output for DFA state table. Conversion of the buffer to a regex.
+- Unicode support
+- Empty regexs (//) should be matched by an empty string
\ No newline at end of file
|
smtlaissezfaire/hopcroft
|
9895f9fbe511ea3d678fe4df94a6363f0389ac30
|
Fill in integration specs
|
diff --git a/spec/hopcoft/integration_spec.rb b/spec/hopcoft/integration_spec.rb
index dcc6343..b2d1b60 100644
--- a/spec/hopcoft/integration_spec.rb
+++ b/spec/hopcoft/integration_spec.rb
@@ -1,167 +1,173 @@
require File.expand_path(File.dirname(__FILE__) + "/../spec_helper")
module Hopcroft
describe "Integration tests" do
describe "the regex /a/" do
before do
@regex = Regex.compile("a")
end
it "should match 'a'" do
@regex.should be_matched_by("a")
end
it "should not match 'b'" do
@regex.should_not be_matched_by("b")
end
it "should not match 'abasdfasdf'" do
@regex.should_not be_matched_by('abasdfasdf')
end
end
describe "the regex /ab/" do
before do
@regex = Regex.compile("ab")
end
it "should match 'ab'" do
@regex.should be_matched_by("ab")
end
it "should not match 'x'" do
@regex.should_not be_matched_by("x")
end
it "should not match 'ba'" do
@regex.should_not be_matched_by("ba")
end
end
describe "the regex /a*/" do
before do
@regex = Regex.compile("a*")
end
it "should be matched by 'a'" do
@regex.should be_matched_by("a")
end
it "should be matched by the empty string" do
@regex.should be_matched_by("")
end
it "should be matched by 'aa'" do
@regex.should be_matched_by("aa")
end
it "should be matched by 'aaa'" do
@regex.should be_matched_by("aaa")
end
it "should not be matched by 'aab'" do
@regex.should_not be_matched_by("aab")
end
end
describe "the regex /a+/" do
before do
@regex = Regex.compile("a+")
end
it "should be matched by 'a'" do
@regex.should be_matched_by("a")
end
it "should NOT be matched by the empty string" do
@regex.should_not be_matched_by("")
end
it "should be matched by 'aa'" do
@regex.should be_matched_by("aa")
end
it "should not be matched by 'aab'" do
@regex.should_not be_matched_by("aab")
end
it "should be matched by 'aaa'" do
@regex.matches?("aaa")
@regex.should be_matched_by("aaa")
end
end
describe "the regex /a|b/" do
before do
@regex = Regex.compile("a|b")
end
it "should be matched by an 'a'" do
@regex.should be_matched_by("a")
end
it "should be matched by a 'b'" do
@regex.should be_matched_by("b")
end
it "should not be matched by a 'c'" do
@regex.should_not be_matched_by("c")
end
it "should not be matched with the string 'ab'" do
@regex.matched_by?("ab")
@regex.should_not be_matched_by("ab")
end
end
describe "the regex /(a|b)+/" do
before do
@regex = Regex.compile("(a|b)+")
end
it "should not match the empty string" do
@regex.should_not be_matched_by("")
end
it "should match an a" do
@regex.should be_matched_by("a")
end
it "should match 'b'" do
@regex.should be_matched_by("b")
end
it "should match 'aaa'" do
@regex.should be_matched_by("aaa")
end
it "should match 'bbb'" do
@regex.should be_matched_by("bbb")
end
it "should match 'ababababbbaaa'" do
@regex.should be_matched_by('ababababbbaaa')
end
it "should not be matched if it contains a different char" do
@regex.should_not be_matched_by("ababbbbaacaab")
end
end
describe "the regex (a|b)+x" do
before do
@regex = Regex.compile("(a|b)+x")
end
it "should match 'ax'" do
@regex.should be_matched_by("ax")
end
- it "should match 'bx'"
+ it "should match 'bx'" do
+ @regex.should be_matched_by("bx")
+ end
- it "should match 'ababx'"
+ it "should match 'ababx'" do
+ @regex.should be_matched_by("ababx")
+ end
- it "should not match 'x'"
+ it "should not match 'x'" do
+ @regex.should_not be_matched_by("x")
+ end
end
end
end
|
smtlaissezfaire/hopcroft
|
7a40d7944ba1b36d9e3d4a3b5855a3068f0a7463
|
Don't infinitely recur with loops in #substates
|
diff --git a/lib/hopcroft/machine/state.rb b/lib/hopcroft/machine/state.rb
index 4184a43..72d4e87 100644
--- a/lib/hopcroft/machine/state.rb
+++ b/lib/hopcroft/machine/state.rb
@@ -1,122 +1,135 @@
module Hopcroft
module Machine
class State
class << self
def reset_counter!
@counter = 1
end
def next_counter
returning counter do |c|
@counter += 1
end
end
def counter
@counter ||= 1
end
end
def initialize(options={})
@start_state = options[:start_state] if options.has_key?(:start_state)
@final_state = options[:final] if options.has_key?(:final)
assign_name(options)
end
attr_reader :name
alias_method :to_s, :name
def inspect
"#{name} {start: #{start_state?}, final: #{final_state?}, transitions: #{transitions.size}}"
end
def transitions
@transitions ||= []
end
attr_writer :transitions
# Accepts the following hash arguments:
#
# :machine => m (optional). Links current state to start state of machine
# given with an epsilon transition.
# :start_state => true | false. Make the state a start state. Defaults to false
# :final => true | false. Make the state a final state. Defaults to false
# :state => a_state (if none passed, a new one is constructed)
# :symbol => Symbol to transition to.
# :epsilon => An Epsilon Transition instead of a regular symbol transition
# :any => An any symbol transition. Equivalent to a regex '.'
#
def add_transition(args={})
args[:start_state] = false unless args.has_key?(:start_state)
if args[:machine]
machine = args[:machine]
args[:state] = machine.start_state
args[:state].start_state = false
args[:epsilon] = true
else
args[:state] ||= State.new(args)
end
returning args[:state] do |state|
transitions << transition_for(args, state)
yield(state) if block_given?
state
end
end
def transition_for(args, state)
if args[:epsilon]
EpsilonTransition.new(state)
elsif args[:any]
AnyCharTransition.new(state)
else
Transition.new(args[:symbol], state)
end
end
def start_state?
@start_state.equal?(false) ? false : true
end
attr_writer :start_state
def final_state?
@final_state ? true : false
end
alias_method :final?, :final_state?
attr_writer :final_state
-
- def substates
- transitions.map { |t| [t.state, t.state.substates] }.flatten
+
+ def substates(excluded_states = [])
+ returning [] do |list|
+ follow_states.each do |state|
+ unless excluded_states.include?(state)
+ excluded_states << state
+
+ list.push state
+ list.push *state.substates(excluded_states)
+ end
+ end
+ end
+ end
+
+ def follow_states(excluded_states = [])
+ transitions.map { |t| t.state }.reject { |s| excluded_states.include?(s) }
end
def add_transitions_to_table(table)
transitions.each do |transition|
to = transition.to
unless table.has_state_change?(self, to, transition.symbol)
table.add_state_change(self, to, transition.symbol)
transition.to.add_transitions_to_table(table)
end
end
end
def deep_clone
returning clone do |c|
c.transitions = transitions.map { |t| t.deep_clone }
end
end
private
def assign_name(options)
@name = options[:name] ? options[:name] : "State #{self.class.next_counter}"
end
end
end
end
diff --git a/spec/hopcoft/integration_spec.rb b/spec/hopcoft/integration_spec.rb
index 5f0389e..dcc6343 100644
--- a/spec/hopcoft/integration_spec.rb
+++ b/spec/hopcoft/integration_spec.rb
@@ -1,165 +1,167 @@
require File.expand_path(File.dirname(__FILE__) + "/../spec_helper")
module Hopcroft
describe "Integration tests" do
describe "the regex /a/" do
before do
@regex = Regex.compile("a")
end
it "should match 'a'" do
@regex.should be_matched_by("a")
end
it "should not match 'b'" do
@regex.should_not be_matched_by("b")
end
it "should not match 'abasdfasdf'" do
@regex.should_not be_matched_by('abasdfasdf')
end
end
describe "the regex /ab/" do
before do
@regex = Regex.compile("ab")
end
it "should match 'ab'" do
@regex.should be_matched_by("ab")
end
it "should not match 'x'" do
@regex.should_not be_matched_by("x")
end
it "should not match 'ba'" do
@regex.should_not be_matched_by("ba")
end
end
describe "the regex /a*/" do
before do
@regex = Regex.compile("a*")
end
it "should be matched by 'a'" do
@regex.should be_matched_by("a")
end
it "should be matched by the empty string" do
@regex.should be_matched_by("")
end
it "should be matched by 'aa'" do
@regex.should be_matched_by("aa")
end
it "should be matched by 'aaa'" do
@regex.should be_matched_by("aaa")
end
it "should not be matched by 'aab'" do
@regex.should_not be_matched_by("aab")
end
end
describe "the regex /a+/" do
before do
@regex = Regex.compile("a+")
end
it "should be matched by 'a'" do
@regex.should be_matched_by("a")
end
it "should NOT be matched by the empty string" do
@regex.should_not be_matched_by("")
end
it "should be matched by 'aa'" do
@regex.should be_matched_by("aa")
end
it "should not be matched by 'aab'" do
@regex.should_not be_matched_by("aab")
end
it "should be matched by 'aaa'" do
@regex.matches?("aaa")
@regex.should be_matched_by("aaa")
end
end
describe "the regex /a|b/" do
before do
@regex = Regex.compile("a|b")
end
it "should be matched by an 'a'" do
@regex.should be_matched_by("a")
end
it "should be matched by a 'b'" do
@regex.should be_matched_by("b")
end
it "should not be matched by a 'c'" do
@regex.should_not be_matched_by("c")
end
it "should not be matched with the string 'ab'" do
@regex.matched_by?("ab")
@regex.should_not be_matched_by("ab")
end
end
describe "the regex /(a|b)+/" do
before do
@regex = Regex.compile("(a|b)+")
end
it "should not match the empty string" do
@regex.should_not be_matched_by("")
end
it "should match an a" do
@regex.should be_matched_by("a")
end
it "should match 'b'" do
@regex.should be_matched_by("b")
end
it "should match 'aaa'" do
@regex.should be_matched_by("aaa")
end
it "should match 'bbb'" do
@regex.should be_matched_by("bbb")
end
it "should match 'ababababbbaaa'" do
@regex.should be_matched_by('ababababbbaaa')
end
it "should not be matched if it contains a different char" do
@regex.should_not be_matched_by("ababbbbaacaab")
end
end
describe "the regex (a|b)+x" do
before do
- # @regex = Regex.compile("(a|b)+x")
+ @regex = Regex.compile("(a|b)+x")
end
- it "should match 'ax'"
+ it "should match 'ax'" do
+ @regex.should be_matched_by("ax")
+ end
it "should match 'bx'"
it "should match 'ababx'"
it "should not match 'x'"
end
end
end
diff --git a/spec/hopcoft/machine/state_spec.rb b/spec/hopcoft/machine/state_spec.rb
index ac9ae81..cceaf22 100644
--- a/spec/hopcoft/machine/state_spec.rb
+++ b/spec/hopcoft/machine/state_spec.rb
@@ -1,277 +1,302 @@
require File.expand_path(File.dirname(__FILE__) + "/../../spec_helper")
module Hopcroft
module Machine
describe State do
it "should set the start state on the first state to a start state" do
state = State.new
state.should be_a_start_state
end
it "should have no transitions to begin with" do
s = State.new
s.transitions.should == []
end
it "should be able to add transitions" do
s = State.new
s.add_transition :symbol => :foo
s.transitions.size.should == 1
end
it "should be a start state" do
s = State.new
s.should be_a_start_state
end
it "should have start state assigned" do
s = State.new
s.start_state = false
s.should_not be_a_start_state
end
it "should not be a final state by default" do
s = State.new
s.should_not be_a_final_state
s.should_not be_final
end
it "should have the final state as assignable" do
s = State.new
s.final_state = true
s.should be_a_final_state
s.should be_final
end
describe "transitions" do
before do
@state = State.new
end
it "should create a transition when calling add_transition" do
@state.add_transition :symbol => :foo
@state.transitions.first.should be_a_kind_of(Transition)
end
it "should pass on the symbol to the transition" do
@state.add_transition :symbol => :baz
transition = @state.transitions.first
transition.symbol.should == :baz
end
it "should construct a new state when none provided" do
@state.add_transition :symbol => :foo
transition = @state.transitions.first
transition.state.should be_a_kind_of(State)
end
it "should not have the new state as the start state" do
@state.add_transition :symbol => :foo
transition = @state.transitions.first
transition.state.should_not be_a_start_state
end
it "should be able to mark the new state as a final state" do
@state.add_transition :symbol => :foo, :final => true
transition = @state.transitions.first
transition.state.should be_a_final_state
end
it "should take another state as the transition target" do
state = mock('state', :null_object => true)
@state.add_transition :symbol => :foo, :state => state
transition = @state.transitions.first
transition.state.should == state
end
it "should be able to add transitions recursively" do
s1 = State.new
s2 = State.new
s1.add_transition :state => s2, :epsilon => true
s2.add_transition :state => s1, :epsilon => true
table = NfaTransitionTable.new
s1.add_transitions_to_table(table)
end
describe "passed :machine => m" do
before do
@state = State.new
@machine = StateMachine.new
end
it "should add a transition to another state machines first state" do
other_machine_start_state = @machine.start_state
@state.add_transition :machine => @machine
@state.transitions.first.state.should == other_machine_start_state
end
it "should add the transition as an epsilon transition" do
@state.add_transition :machine => @machine
@state.transitions.first.should be_a_kind_of(EpsilonTransition)
end
it "should no longer have the other machines start state as a start state in this machine" do
other_machine_start_state = @machine.start_state
@state.add_transition :machine => @machine
@state.transitions.first.state.should_not be_a_start_state
end
end
end
describe "name" do
it "should take a name param" do
state = State.new(:name => "foo")
state.name.should == "foo"
end
it "should auto-assign a state #" do
State.reset_counter!
state = State.new
state.name.should == "State 1"
end
it "should assign 'State 2' for the second state created" do
State.reset_counter!
State.new
state2 = State.new
state2.name.should == "State 2"
end
end
describe "to_s" do
it "should be aliased to the name" do
s = State.new
s.method(:name).should == s.method(:to_s)
end
end
describe "inspect" do
it "should display the name" do
s = State.new(:name => "State 1")
s.inspect.should include("State 1")
end
it "should show start state, final state, etc." do
s = State.new(:name => "State 1", :start_state => true, :final => true)
s.inspect.should == "State 1 {start: true, final: true, transitions: 0}"
end
it "should display the correct value for the start state" do
s = State.new(:name => "State 1", :start_state => false, :final => true)
s.inspect.should == "State 1 {start: false, final: true, transitions: 0}"
end
it "should display the correct value for the final state" do
s = State.new(:name => "State 1", :start_state => true, :final => false)
s.inspect.should == "State 1 {start: true, final: false, transitions: 0}"
end
it "should display 1 transition" do
s = State.new(:name => "State 1", :start_state => true, :final => true)
s.add_transition
s.inspect.should == "State 1 {start: true, final: true, transitions: 1}"
end
end
describe "deep_clone" do
before do
@state = State.new
end
it "should be of class State" do
clone = @state.deep_clone
clone.should be_a_kind_of(State)
end
it "should be a new instance" do
clone = @state.deep_clone
clone.should_not equal(@state)
end
it "should be a final state if the original was a final state" do
@state.final_state = true
clone = @state.deep_clone
clone.should be_a_final_state
end
it "should not have the same transition objects" do
@state.add_transition
transition = @state.transitions.first
clone = @state.deep_clone
clone.transitions.first.should_not equal(transition)
end
it "should have one transition if the original had one transition" do
@state.add_transition
clone = @state.deep_clone
clone.transitions.size.should == 1
end
it "should have two transitions if the original had two transition" do
@state.add_transition
@state.add_transition
clone = @state.deep_clone
clone.transitions.size.should == 2
end
it "should have a transition as a Transition object" do
@state.add_transition
clone = @state.deep_clone
clone.transitions.first.should be_a_kind_of(Transition)
end
it "should call deep_clone on the transitions" do
@state.add_transition
@state.transitions.first.should_receive(:deep_clone)
@state.deep_clone
end
end
describe "substates" do
before do
@state = State.new
end
it "should have none with no transitions" do
@state.substates.should == []
end
it "should have a state which is linked to by a transition" do
new_state = @state.add_transition :symbol => :foo
@state.substates.should == [new_state]
end
it "should have multiple states" do
one = @state.add_transition :symbol => :foo
two = @state.add_transition :symbol => :foo
@state.substates.should == [one, two]
end
it "should show states of the states (should find the states substates recursively)" do
substate = @state.add_transition :symbol => :foo
sub_substate = substate.add_transition :symbol => :foo
@state.substates.should == [substate, sub_substate]
end
+
+ it "should work with recursive transitions" do
+ @state.add_transition :state => @state
+
+ @state.substates.should == [@state]
+ end
+
+ it "should not find duplicate states" do
+ state2 = @state.add_transition
+ state3 = state2.add_transition
+ state4 = state3.add_transition
+
+ state5 = state2.add_transition
+ state4.add_transition :state => state5
+
+ @state.substates.should == [state2, state3, state4, state5]
+ end
+
+ it "should deal with infinite recursion on more than one level" do
+ state2 = @state.add_transition
+ state3 = state2.add_transition
+ state3.add_transition :state => @state
+
+ @state.substates.should == [state2, state3, @state]
+ end
end
end
end
end
|
smtlaissezfaire/hopcroft
|
8b7742d73803d2829e5b0fc3bf73b3ddee7fd460
|
Fix escaping which was intentionally broken when refactoring to grep's grammar
|
diff --git a/lib/hopcroft/regex/regex_parser.treetop b/lib/hopcroft/regex/regex_parser.treetop
index 66bced1..7180511 100644
--- a/lib/hopcroft/regex/regex_parser.treetop
+++ b/lib/hopcroft/regex/regex_parser.treetop
@@ -1,168 +1,168 @@
#
# This grammar is taken from GNU's grep grammar, with slight modifications. It doesn't
# support backreferencing, metachars, negated character classes, repetion
# with {n,m}, etc. - although there is no reason that it couldn't.
#
# In addition, GNU's grep grammar is modified for an LL parser. LL parsers can't
# process left recursion without under going left factorization:
#
# See:
# http://treetop.rubyforge.org/pitfalls_and_advanced_techniques.html
# http://en.wikipedia.org/wiki/Left_recursion#Removing_immediate_left_recursion
#
#
# From GNU grep:
# The grammar understood by the parser is as follows.
#
# regexp:
# regexp OR branch
# branch
#
# branch:
# branch closure
# closure
#
# closure:
# closure QMARK
# closure STAR
# closure PLUS
# closure REPMN
# atom
#
# atom:
# <normal character>
# <multibyte character>
# ANYCHAR
# MBCSET
# CSET
# BACKREF
# BEGLINE
# ENDLINE
# BEGWORD
# ENDWORD
# LIMWORD
# NOTLIMWORD
# CRANGE
# LPAREN regexp RPAREN
# <empty>
#
# The parser builds a parse tree in postfix form in an array of tokens.
module Hopcroft
module Regex
grammar TreetopRegex
rule regex
branch regex_prime <SyntaxNodes::Regex>
end
rule regex_prime
OR branch subexpression:regex_prime <SyntaxNodes::Alternation> /
epsilon
end
rule branch
closure branch_prime <SyntaxNodes::Branch>
end
rule branch_prime
closure branch_prime <SyntaxNodes::Concatenation> /
epsilon
end
rule epsilon
"" <SyntaxNodes::Epsilon>
end
rule closure
atom closure_prime <SyntaxNodes::Closure>
end
rule closure_prime
kleen_star /
one_or_more_expr /
optional_expr /
epsilon
end
rule kleen_star
"*" <SyntaxNodes::KleenStar>
end
rule one_or_more_expr
"+" <SyntaxNodes::OneOrMoreExpression>
end
rule optional_expr
"?" <SyntaxNodes::OptionalExpression>
end
rule atom
parenthesized_expression /
dot /
character_class /
- single_char <SyntaxNodes::Char>
+ single_char
end
rule dot
"." <SyntaxNodes::Dot>
end
rule parenthesized_expression
LEFT_PARENS regex RIGHT_PARENS <SyntaxNodes::ParenthesizedExpression>
end
rule character_class
LEFT_BRACKET inner_char_class RIGHT_BRACKET <SyntaxNodes::CharacterClass>
end
rule inner_char_class
inner_char_class_expr+ <SyntaxNodes::MultipleInnerCharClass>
end
rule inner_char_class_expr
one:single_char "-" two:single_char <SyntaxNodes::TwoCharClass> /
single_char <SyntaxNodes::OneCharClass>
end
rule single_char
non_special_char / escaped_char
end
rule non_special_char
- !("(" / ")" / "[" / "+" / "?" / "+" / "]" / "|" / "*" / "\\") ANY_CHAR
+ !("(" / ")" / "[" / "+" / "?" / "+" / "]" / "|" / "*" / "\\") any_char:ANY_CHAR <SyntaxNodes::Char>
end
rule escaped_char
- ESCAPE any_char:ANY_CHAR
+ ESCAPE any_char:ANY_CHAR <SyntaxNodes::Char>
end
rule OR
"|"
end
rule ANY_CHAR
.
end
rule LEFT_BRACKET
"["
end
rule RIGHT_BRACKET
"]"
end
rule ESCAPE
"\\"
end
rule LEFT_PARENS
"("
end
rule RIGHT_PARENS
")"
end
end
end
end
diff --git a/lib/hopcroft/regex/syntax_nodes.rb b/lib/hopcroft/regex/syntax_nodes.rb
index 18d1678..0f16f7f 100644
--- a/lib/hopcroft/regex/syntax_nodes.rb
+++ b/lib/hopcroft/regex/syntax_nodes.rb
@@ -1,125 +1,125 @@
module Hopcroft
module Regex
module SyntaxNodes
class Base < ::Treetop::Runtime::SyntaxNode; end
class Regex < Base
def eval
if tuple = regex_prime.eval
tuple.first.new(branch.eval, tuple.last)
else
branch.eval
end
end
end
module Char
def eval
- Hopcroft::Regex::Char.new(text_value)
+ Hopcroft::Regex::Char.new(any_char.text_value)
end
end
class Branch < Base
def eval
if branch_prime.eval
closure.eval + branch_prime.eval
else
closure.eval
end
end
end
class Alternation < Base
def eval
if sub = subexpression.eval
subexpression = sub.first.new(branch.eval, sub.last)
[Hopcroft::Regex::Alternation, subexpression]
else
[Hopcroft::Regex::Alternation, branch.eval]
end
end
end
class Concatenation < Base
def eval
if other = branch_prime.eval
closure.eval + branch_prime.eval
else
closure.eval
end
end
end
class Closure < Base
def eval
if closure_prime.eval
closure_prime.eval.new(atom.eval)
else
atom.eval
end
end
end
class KleenStar < Base
def eval
Hopcroft::Regex::KleenStar
end
end
class OptionalExpression < Base
def eval
Hopcroft::Regex::OptionalSymbol
end
end
class OneOrMoreExpression < Base
def eval
Hopcroft::Regex::Plus
end
end
class CharacterClass < Base
def eval
Hopcroft::Regex::CharacterClass.new(*inner_char_class.eval)
end
end
class MultipleInnerCharClass < Base
def eval
elements.map { |e| e.eval }
end
end
class TwoCharClass < Base
def eval
"#{one.text_value}-#{two.text_value}"
end
end
module OneCharClass
def eval
text_value
end
end
class Epsilon < Base
def eval
nil
end
end
class ParenthesizedExpression < Base
def eval
regex.eval
end
end
class Dot < Base
def eval
Hopcroft::Regex::Dot.new
end
end
end
end
end
diff --git a/spec/hopcoft/regex/parser_spec.rb b/spec/hopcoft/regex/parser_spec.rb
index e5b404e..cebe9c2 100644
--- a/spec/hopcoft/regex/parser_spec.rb
+++ b/spec/hopcoft/regex/parser_spec.rb
@@ -1,326 +1,322 @@
require File.expand_path(File.dirname(__FILE__) + "/../../spec_helper")
module Hopcroft
module Regex
describe Parser do
it "should parse 'a' as a Char" do
Parser.parse("a", true).should == Char.new('a')
end
it "should parse '(a)' as a Char" do
Parser.parse("(a)").should == Char.new("a")
end
it "should parse 'b' as a Char" do
Parser.parse("b").should == Char.new("b")
end
it "should parse 'A' as a Char" do
Parser.parse("A").should == Char.new("A")
end
it "should parse 'Z' as a Char" do
Parser.parse("Z").should == Char.new("Z")
end
it "should parse '0' as a Char" do
Parser.parse("0").should == Char.new("0")
end
it "should parse '9' as a Char" do
Parser.parse("9").should == Char.new("9")
end
it "should parse 'ab' as two chars" do
result = Parser.parse("ab")
result.should == (Char.new("a") + Char.new("b"))
result.should be_a_kind_of(Concatenation)
end
it "should parse 'bc' as two chars" do
result = Parser.parse("bc")
result.should == (Char.new("b") + Char.new("c"))
result.should be_a_kind_of(Concatenation)
end
it "should parse a '.' as a Dot" do
Parser.parse(".").should == Dot.new
end
it "should parse a '\.' as a char dot" do
Parser.parse('\.').should == Char.new(".")
end
it "should parse '\..' as an escaped char + a dot" do
Parser.parse("\\..").should == (Char.new(".") + Dot.new)
end
it "should parse 'a*' as a kleen star" do
Parser.parse("a*").should == KleenStar.new(Char.new("a"))
end
it "should parse 'b*' as a kleen star" do
Parser.parse("b*").should == KleenStar.new(Char.new("b"))
end
it "should parse '\*' as the star char" do
Parser.parse("\\*").should == Char.new("*")
end
it "should parse 'a\*' as a followed by a char" do
Parser.parse("a\\*").should == (Char.new("a") + Char.new("*"))
end
it "should parse a? as an optional a" do
Parser.parse("a?").should == OptionalSymbol.new(Char.new("a"))
end
it "should parse b? as an optional b" do
Parser.parse("b?").should == OptionalSymbol.new(Char.new("b"))
end
it "should parse an escaped question mark as a char" do
Parser.parse("\\?").should == Char.new("?")
end
it "should parse a plus" do
Parser.parse("a+").should == Plus.new(Char.new("a"))
end
it "should parse 'b+'" do
Parser.parse("b+").should == Plus.new(Char.new("b"))
end
it "should parse an escaped plus" do
Parser.parse("\\+").should == Char.new("+")
end
it "should parse [a-z] as a character class" do
Parser.parse("[a-z]").should == CharacterClass.new("a-z")
end
it "should parse [b-c] as a character class" do
Parser.parse("[b-c]").should == CharacterClass.new("b-c")
end
it "should parse \ as an open bracket char" do
Parser.parse("\\[").should == Char.new("[")
end
it "should parse \] as a closed bracket char" do
Parser.parse("\\]").should == Char.new("]")
end
it "should parse 'ab' as a concatenation of a and b" do
char1 = Char.new("a")
char2 = Char.new("b")
Parser.parse("ab").should == Concatenation.new(char1, char2)
Parser.parse("ab").should be_a_kind_of(Concatenation)
end
it "should parse [a-z]* as a kleen star of a char class" do
Parser.parse("[a-z]*").should == KleenStar.new(CharacterClass.new("a-z"))
end
it "should parse alternation" do
result = Parser.parse("a|b")
result.should be_a_kind_of(Alternation)
result.should == Alternation.new(Char.new("a"), Char.new("b"))
end
it "should parse correct chars in the alternation" do
result = Parser.parse("x|y")
result.should be_a_kind_of(Alternation)
result.should == Alternation.new(Char.new("x"), Char.new("y"))
end
it "should parse '.|a' as an alternation" do
result = Parser.parse(".|a")
result.should be_a_kind_of(Alternation)
result.should == Alternation.new(Dot.new, Char.new("a"))
end
it "should allow a char class in the second position" do
result = Parser.parse(".|[a-z]")
result.should be_a_kind_of(Alternation)
result.should == Alternation.new(Dot.new, CharacterClass.new("a-z"))
result.expressions.last.should be_a_kind_of(CharacterClass)
end
it "should allow a plus after a char class" do
result = Parser.parse("[a-z]+")
result.should be_a_kind_of(Plus)
result.should == Plus.new(CharacterClass.new("a-z"))
end
it "should see an escaped plus as a char" do
Parser.parse('\+').should be_a_kind_of(Char)
end
it "should see an escaped plus with a argment in front of it as an escaped plus with a concatenation" do
result = Parser.parse('a\+')
result.should == Concatenation.new(Char.new("a"), Char.new("+"))
end
it "should allow an optional char class" do
result = Parser.parse("[a-z]?")
result.should == OptionalSymbol.new(CharacterClass.new("a-z"))
end
it "should parse with parens" do
result = Parser.parse("([a-z])")
result.should be_a_kind_of(CharacterClass)
end
it "should parse an escaped paren inside parens" do
- pending do
- result = Parser.parse('(\()')
- result.should == Char.new("(")
- end
+ result = Parser.parse('(\()')
+ result.should == Char.new("(")
end
it "should allow parens around a concatenation" do
result = Parser.parse("(ab)")
result.should == (Char.new("a") + Char.new("b"))
end
it "should parse matching escaped parens inside a set of parens" do
- pending do
- result = Parser.parse '(\(\))'
- result.should == (Char.new("(") + Char.new(")"))
- end
+ result = Parser.parse '(\(\))'
+ result.should == (Char.new("(") + Char.new(")"))
end
it "should parse two sets of parens around each other" do
result = Parser.parse "((ab))"
result.should == (Char.new("a") + Char.new("b"))
end
it "should parse a number" do
result = Parser.parse("9")
result.should == Char.new("9")
end
it "should parse any single non-special char (one that isn't in the regex set)" do
result = Parser.parse("$")
result.should == Char.new("$")
end
it "should parse an escaped or" do
result = Parser.parse('\|')
result.should == Char.new("|")
end
it "should parse an underscore" do
result = Parser.parse("_")
result.should == Char.new("_")
end
it "should parse a char class with one element" do
result = Parser.parse("[a]")
result.should == Char.new("a")
end
it "should parse a char class with one element but a different char" do
result = Parser.parse("[b]")
result.should == Char.new("b")
end
it "should parse an escaped special char inside a character class" do
result = Parser.parse('[\+]')
result.should be_a_kind_of(Char)
result.should == Char.new("+")
end
it "should parse two escaped chars within a char range" do
result = Parser.parse '[\a-\b]'
result.should be_a_kind_of(CharacterClass)
result.should == CharacterClass.new("\\a-\\b")
end
it "should NOT parse an empty char class" do
lambda {
Parser.parse("[]")
}.should raise_error(Parser::ParseError)
end
["+", "?", "*", "[", "]", "\\", "|"].each do |char|
it "should not parse the regex '#{char}'" do
lambda {
Parser.parse("#{char}")
}.should raise_error(Parser::ParseError)
end
end
it "should raise an error if it cannot parse a string" do
lambda {
Parser.parse("[")
}.should raise_error(Parser::ParseError, "could not parse the regex '['")
end
it "should use the correct string name" do
lambda {
Parser.parse("]")
}.should raise_error(Parser::ParseError, "could not parse the regex ']'")
end
it "should allow multiple expressions inside a char class (i.e [a-zA-Z])" do
result = Parser.parse("[a-zA-Z]")
result.should be_a_kind_of(CharacterClass)
end
it "should be able to parse multiple ORs (a|b|c)" do
result = Parser.parse("a|b|c")
result.should == Alternation.new(Char.new("a"), Alternation.new(Char.new("b"), Char.new("c")))
end
it "should be able to parse (a|b)+" do
result = Parser.parse("(a|b)+")
result.should be_a_kind_of(Plus)
end
it "should be able to parse (a|b+)x" do
result = Parser.parse("(a|b+)x")
result.should be_a_kind_of(Concatenation)
end
it "should be able to parse (a|b)+x" do
result = Parser.parse("(a|b)+x")
result.should be_a_kind_of(Concatenation)
end
it "should be able to parse (a)" do
result = Parser.parse("(a)")
result.should be_a_kind_of(Char)
end
it "should be able to parse 'a+b+'" do
result = Parser.parse("a+b+", true)
result.should be_a_kind_of(Concatenation)
end
it "should be able to parse 'a+b+c+'" do
result = Parser.parse("a+b+c+")
result.should be_a_kind_of(Concatenation)
end
it "should parse an eval 'a+b+c+" do
result = Parser.parse("a+b+c+")
result.should == (Plus.new(Char.new("a")) + Plus.new(Char.new("b")) + Plus.new(Char.new("c")))
end
describe "debugging info" do
it "should have debugging info off by default" do
Parser.new.should_not be_debugging
end
it "should be able to set debugging information" do
p = Parser.new
p.debug = true
p.should be_debugging
end
end
end
end
end
|
smtlaissezfaire/hopcroft
|
53cad6f57a2deabdd4402e5a2f3916f89114f23c
|
Replace regex grammar with the grammar from grep (but use left-factorization so that it does not recurse infinitely). Add comments to parser.
|
diff --git a/lib/hopcroft/regex/regex_parser.treetop b/lib/hopcroft/regex/regex_parser.treetop
index b6ccb7c..66bced1 100644
--- a/lib/hopcroft/regex/regex_parser.treetop
+++ b/lib/hopcroft/regex/regex_parser.treetop
@@ -1,95 +1,168 @@
+#
+# This grammar is taken from GNU's grep grammar, with slight modifications. It doesn't
+# support backreferencing, metachars, negated character classes, repetion
+# with {n,m}, etc. - although there is no reason that it couldn't.
+#
+# In addition, GNU's grep grammar is modified for an LL parser. LL parsers can't
+# process left recursion without under going left factorization:
+#
+# See:
+# http://treetop.rubyforge.org/pitfalls_and_advanced_techniques.html
+# http://en.wikipedia.org/wiki/Left_recursion#Removing_immediate_left_recursion
+#
+#
+# From GNU grep:
+# The grammar understood by the parser is as follows.
+#
+# regexp:
+# regexp OR branch
+# branch
+#
+# branch:
+# branch closure
+# closure
+#
+# closure:
+# closure QMARK
+# closure STAR
+# closure PLUS
+# closure REPMN
+# atom
+#
+# atom:
+# <normal character>
+# <multibyte character>
+# ANYCHAR
+# MBCSET
+# CSET
+# BACKREF
+# BEGLINE
+# ENDLINE
+# BEGWORD
+# ENDWORD
+# LIMWORD
+# NOTLIMWORD
+# CRANGE
+# LPAREN regexp RPAREN
+# <empty>
+#
+# The parser builds a parse tree in postfix form in an array of tokens.
+
module Hopcroft
module Regex
grammar TreetopRegex
- # expr -> expr "*"
- # -> expr "+"
- # -> expr expr # concatenation
- # -> "(" expr ")"
- # -> /[a-zA-Z]+/
- #
- # after left factorization:
- #
- # expr -> "(" expr-prime ")"
- # -> /[az-A-Z]/ expr-prime
- #
- # expr-prime -> ""
- # -> "*"
- # -> "+"
- # -> expr
- #
- #
- # See http://en.wikipedia.org/wiki/Left_recursion#Removing_immediate_left_recursion
-
rule regex
- LEFT_PARENS regex RIGHT_PARENS subexpression <SyntaxNodes::ParenthesizedSubexpression> /
- left_factored_expression subexpression <SyntaxNodes::LeftFactoredExpression>
+ branch regex_prime <SyntaxNodes::Regex>
end
- rule subexpression
- "*" <SyntaxNodes::KleenStar> /
- "+" <SyntaxNodes::Plus> /
- "?" <SyntaxNodes::OptionalSymbol> /
- "|" regex <SyntaxNodes::Alternation> /
- regex <SyntaxNodes::Concatenation> /
- "" <SyntaxNodes::Epsilon>
+ rule regex_prime
+ OR branch subexpression:regex_prime <SyntaxNodes::Alternation> /
+ epsilon
end
- rule left_factored_expression
- dot / character_class / single_char
+ rule branch
+ closure branch_prime <SyntaxNodes::Branch>
+ end
+
+ rule branch_prime
+ closure branch_prime <SyntaxNodes::Concatenation> /
+ epsilon
end
- rule character_class
- LEFT_BRACKET inner_char_class RIGHT_BRACKET <SyntaxNodes::CharClass>
+ rule epsilon
+ "" <SyntaxNodes::Epsilon>
end
- rule inner_char_class
- inner_char_class_expr+ <SyntaxNodes::MultipleInnerCharClassExpressions>
+ rule closure
+ atom closure_prime <SyntaxNodes::Closure>
end
- rule inner_char_class_expr
- one:single_char "-" two:single_char <SyntaxNodes::TwoCharClass> /
- single_char <SyntaxNodes::OneCharCharClass>
+ rule closure_prime
+ kleen_star /
+ one_or_more_expr /
+ optional_expr /
+ epsilon
+ end
+
+ rule kleen_star
+ "*" <SyntaxNodes::KleenStar>
+ end
+
+ rule one_or_more_expr
+ "+" <SyntaxNodes::OneOrMoreExpression>
+ end
+
+ rule optional_expr
+ "?" <SyntaxNodes::OptionalExpression>
+ end
+
+ rule atom
+ parenthesized_expression /
+ dot /
+ character_class /
+ single_char <SyntaxNodes::Char>
end
rule dot
"." <SyntaxNodes::Dot>
end
+ rule parenthesized_expression
+ LEFT_PARENS regex RIGHT_PARENS <SyntaxNodes::ParenthesizedExpression>
+ end
+
+ rule character_class
+ LEFT_BRACKET inner_char_class RIGHT_BRACKET <SyntaxNodes::CharacterClass>
+ end
+
+ rule inner_char_class
+ inner_char_class_expr+ <SyntaxNodes::MultipleInnerCharClass>
+ end
+
+ rule inner_char_class_expr
+ one:single_char "-" two:single_char <SyntaxNodes::TwoCharClass> /
+ single_char <SyntaxNodes::OneCharClass>
+ end
+
rule single_char
non_special_char / escaped_char
end
rule non_special_char
- !("(" / ")" / "[" / "+" / "?" / "+" / "]" / "|" / "*" / "\\") ANY_CHAR <SyntaxNodes::NonSpecialChar>
+ !("(" / ")" / "[" / "+" / "?" / "+" / "]" / "|" / "*" / "\\") ANY_CHAR
end
rule escaped_char
- ESCAPE any_char:ANY_CHAR <SyntaxNodes::EscapedChar>
+ ESCAPE any_char:ANY_CHAR
+ end
+
+ rule OR
+ "|"
end
rule ANY_CHAR
.
end
rule LEFT_BRACKET
"["
end
rule RIGHT_BRACKET
"]"
end
rule ESCAPE
"\\"
end
rule LEFT_PARENS
"("
end
rule RIGHT_PARENS
")"
end
end
end
end
diff --git a/lib/hopcroft/regex/syntax_nodes.rb b/lib/hopcroft/regex/syntax_nodes.rb
index 2e28a20..18d1678 100644
--- a/lib/hopcroft/regex/syntax_nodes.rb
+++ b/lib/hopcroft/regex/syntax_nodes.rb
@@ -1,144 +1,125 @@
module Hopcroft
module Regex
module SyntaxNodes
class Base < ::Treetop::Runtime::SyntaxNode; end
- class MultiExpression < Base
+ class Regex < Base
def eval
- second_expression.call(first_expression)
+ if tuple = regex_prime.eval
+ tuple.first.new(branch.eval, tuple.last)
+ else
+ branch.eval
+ end
end
end
- class LeftFactoredExpression < MultiExpression
- def first_expression
- @first_expression ||= left_factored_expression.eval.call(left_factored_expression)
- end
-
- def second_expression
- @second_expression ||= subexpression.eval
+ module Char
+ def eval
+ Hopcroft::Regex::Char.new(text_value)
end
end
- class ParenthesizedSubexpression < MultiExpression
- def first_expression
- @first_expression ||= regex.eval
- end
-
- def second_expression
- @second_expression ||= subexpression.eval
+ class Branch < Base
+ def eval
+ if branch_prime.eval
+ closure.eval + branch_prime.eval
+ else
+ closure.eval
+ end
end
end
- module Concatenation
- def eval
- lambda do |obj|
- subexpressions = elements.map { |e| e.eval }.compact
-
- if subexpressions.any?
- Regex::Concatenation.new(obj, subexpressions.first.call(self))
- else
- obj
- end
+ class Alternation < Base
+ def eval
+ if sub = subexpression.eval
+ subexpression = sub.first.new(branch.eval, sub.last)
+ [Hopcroft::Regex::Alternation, subexpression]
+ else
+ [Hopcroft::Regex::Alternation, branch.eval]
end
end
end
-
- class Plus < Base
+
+ class Concatenation < Base
def eval
- lambda do |obj|
- Hopcroft::Regex::Plus.new(obj)
+ if other = branch_prime.eval
+ closure.eval + branch_prime.eval
+ else
+ closure.eval
end
end
end
- class KleenStar < Base
+ class Closure < Base
def eval
- lambda do |obj|
- Hopcroft::Regex::KleenStar.new(obj)
+ if closure_prime.eval
+ closure_prime.eval.new(atom.eval)
+ else
+ atom.eval
end
end
end
- class OptionalSymbol < Base
+ class KleenStar < Base
def eval
- lambda do |obj|
- Hopcroft::Regex::OptionalSymbol.new(obj)
- end
+ Hopcroft::Regex::KleenStar
end
end
- class Epsilon < Base
+ class OptionalExpression < Base
def eval
- lambda do |obj|
- obj
- end
+ Hopcroft::Regex::OptionalSymbol
end
end
- class Alternation < Base
+ class OneOrMoreExpression < Base
def eval
- lambda do |obj|
- Regex::Alternation.new(obj, regex.eval)
- end
+ Hopcroft::Regex::Plus
end
end
- class Dot < Base
+ class CharacterClass < Base
def eval
- lambda do |obj|
- Regex::Dot.new
- end
+ Hopcroft::Regex::CharacterClass.new(*inner_char_class.eval)
end
end
- module NonSpecialChar
+ class MultipleInnerCharClass < Base
def eval
- lambda do |obj|
- Char.new(obj.text_value)
- end
+ elements.map { |e| e.eval }
end
end
- class CharClass < Base
+ class TwoCharClass < Base
def eval
- lambda do
- CharacterClass.new(*inner_char_class.eval.call)
- end
+ "#{one.text_value}-#{two.text_value}"
end
end
- class MultipleInnerCharClassExpressions < Base
+ module OneCharClass
def eval
- lambda do
- elements.map { |e| e.eval.call }
- end
+ text_value
end
end
- class TwoCharClass < Base
+ class Epsilon < Base
def eval
- lambda do
- "#{one.text_value}-#{two.text_value}"
- end
+ nil
end
end
- module OneCharCharClass
+ class ParenthesizedExpression < Base
def eval
- lambda do
- text_value
- end
+ regex.eval
end
end
- class EscapedChar < Base
+ class Dot < Base
def eval
- lambda do |obj|
- Char.new(any_char.text_value)
- end
+ Hopcroft::Regex::Dot.new
end
end
end
end
end
-
\ No newline at end of file
+
diff --git a/spec/hopcoft/regex/parser_spec.rb b/spec/hopcoft/regex/parser_spec.rb
index 76e0577..e5b404e 100644
--- a/spec/hopcoft/regex/parser_spec.rb
+++ b/spec/hopcoft/regex/parser_spec.rb
@@ -1,316 +1,326 @@
require File.expand_path(File.dirname(__FILE__) + "/../../spec_helper")
module Hopcroft
module Regex
describe Parser do
it "should parse 'a' as a Char" do
- Parser.parse("a").should == Char.new('a')
+ Parser.parse("a", true).should == Char.new('a')
end
it "should parse '(a)' as a Char" do
Parser.parse("(a)").should == Char.new("a")
end
-
+
it "should parse 'b' as a Char" do
Parser.parse("b").should == Char.new("b")
end
it "should parse 'A' as a Char" do
Parser.parse("A").should == Char.new("A")
end
it "should parse 'Z' as a Char" do
Parser.parse("Z").should == Char.new("Z")
end
it "should parse '0' as a Char" do
Parser.parse("0").should == Char.new("0")
end
it "should parse '9' as a Char" do
Parser.parse("9").should == Char.new("9")
end
it "should parse 'ab' as two chars" do
result = Parser.parse("ab")
result.should == (Char.new("a") + Char.new("b"))
result.should be_a_kind_of(Concatenation)
end
it "should parse 'bc' as two chars" do
result = Parser.parse("bc")
result.should == (Char.new("b") + Char.new("c"))
result.should be_a_kind_of(Concatenation)
end
it "should parse a '.' as a Dot" do
Parser.parse(".").should == Dot.new
end
it "should parse a '\.' as a char dot" do
Parser.parse('\.').should == Char.new(".")
end
it "should parse '\..' as an escaped char + a dot" do
Parser.parse("\\..").should == (Char.new(".") + Dot.new)
end
it "should parse 'a*' as a kleen star" do
Parser.parse("a*").should == KleenStar.new(Char.new("a"))
end
it "should parse 'b*' as a kleen star" do
Parser.parse("b*").should == KleenStar.new(Char.new("b"))
end
it "should parse '\*' as the star char" do
Parser.parse("\\*").should == Char.new("*")
end
it "should parse 'a\*' as a followed by a char" do
Parser.parse("a\\*").should == (Char.new("a") + Char.new("*"))
end
it "should parse a? as an optional a" do
Parser.parse("a?").should == OptionalSymbol.new(Char.new("a"))
end
it "should parse b? as an optional b" do
Parser.parse("b?").should == OptionalSymbol.new(Char.new("b"))
end
it "should parse an escaped question mark as a char" do
Parser.parse("\\?").should == Char.new("?")
end
it "should parse a plus" do
Parser.parse("a+").should == Plus.new(Char.new("a"))
end
it "should parse 'b+'" do
Parser.parse("b+").should == Plus.new(Char.new("b"))
end
it "should parse an escaped plus" do
Parser.parse("\\+").should == Char.new("+")
end
it "should parse [a-z] as a character class" do
Parser.parse("[a-z]").should == CharacterClass.new("a-z")
end
it "should parse [b-c] as a character class" do
Parser.parse("[b-c]").should == CharacterClass.new("b-c")
end
it "should parse \ as an open bracket char" do
Parser.parse("\\[").should == Char.new("[")
end
it "should parse \] as a closed bracket char" do
Parser.parse("\\]").should == Char.new("]")
end
it "should parse 'ab' as a concatenation of a and b" do
char1 = Char.new("a")
char2 = Char.new("b")
Parser.parse("ab").should == Concatenation.new(char1, char2)
Parser.parse("ab").should be_a_kind_of(Concatenation)
end
it "should parse [a-z]* as a kleen star of a char class" do
Parser.parse("[a-z]*").should == KleenStar.new(CharacterClass.new("a-z"))
end
it "should parse alternation" do
result = Parser.parse("a|b")
result.should be_a_kind_of(Alternation)
result.should == Alternation.new(Char.new("a"), Char.new("b"))
end
it "should parse correct chars in the alternation" do
result = Parser.parse("x|y")
result.should be_a_kind_of(Alternation)
result.should == Alternation.new(Char.new("x"), Char.new("y"))
end
it "should parse '.|a' as an alternation" do
result = Parser.parse(".|a")
result.should be_a_kind_of(Alternation)
result.should == Alternation.new(Dot.new, Char.new("a"))
end
it "should allow a char class in the second position" do
result = Parser.parse(".|[a-z]")
result.should be_a_kind_of(Alternation)
result.should == Alternation.new(Dot.new, CharacterClass.new("a-z"))
result.expressions.last.should be_a_kind_of(CharacterClass)
end
it "should allow a plus after a char class" do
result = Parser.parse("[a-z]+")
result.should be_a_kind_of(Plus)
result.should == Plus.new(CharacterClass.new("a-z"))
end
it "should see an escaped plus as a char" do
Parser.parse('\+').should be_a_kind_of(Char)
end
it "should see an escaped plus with a argment in front of it as an escaped plus with a concatenation" do
result = Parser.parse('a\+')
result.should == Concatenation.new(Char.new("a"), Char.new("+"))
end
it "should allow an optional char class" do
result = Parser.parse("[a-z]?")
result.should == OptionalSymbol.new(CharacterClass.new("a-z"))
end
it "should parse with parens" do
result = Parser.parse("([a-z])")
result.should be_a_kind_of(CharacterClass)
end
it "should parse an escaped paren inside parens" do
- result = Parser.parse('(\()')
- result.should == Char.new("(")
+ pending do
+ result = Parser.parse('(\()')
+ result.should == Char.new("(")
+ end
end
it "should allow parens around a concatenation" do
result = Parser.parse("(ab)")
result.should == (Char.new("a") + Char.new("b"))
end
it "should parse matching escaped parens inside a set of parens" do
- result = Parser.parse '(\(\))'
- result.should == (Char.new("(") + Char.new(")"))
+ pending do
+ result = Parser.parse '(\(\))'
+ result.should == (Char.new("(") + Char.new(")"))
+ end
end
it "should parse two sets of parens around each other" do
result = Parser.parse "((ab))"
result.should == (Char.new("a") + Char.new("b"))
end
it "should parse a number" do
result = Parser.parse("9")
result.should == Char.new("9")
end
it "should parse any single non-special char (one that isn't in the regex set)" do
result = Parser.parse("$")
result.should == Char.new("$")
end
it "should parse an escaped or" do
result = Parser.parse('\|')
result.should == Char.new("|")
end
it "should parse an underscore" do
result = Parser.parse("_")
result.should == Char.new("_")
end
it "should parse a char class with one element" do
result = Parser.parse("[a]")
result.should == Char.new("a")
end
it "should parse a char class with one element but a different char" do
result = Parser.parse("[b]")
result.should == Char.new("b")
end
it "should parse an escaped special char inside a character class" do
result = Parser.parse('[\+]')
result.should be_a_kind_of(Char)
result.should == Char.new("+")
end
it "should parse two escaped chars within a char range" do
result = Parser.parse '[\a-\b]'
result.should be_a_kind_of(CharacterClass)
result.should == CharacterClass.new("\\a-\\b")
end
it "should NOT parse an empty char class" do
lambda {
Parser.parse("[]")
}.should raise_error(Parser::ParseError)
end
["+", "?", "*", "[", "]", "\\", "|"].each do |char|
it "should not parse the regex '#{char}'" do
lambda {
Parser.parse("#{char}")
}.should raise_error(Parser::ParseError)
end
end
it "should raise an error if it cannot parse a string" do
lambda {
Parser.parse("[")
}.should raise_error(Parser::ParseError, "could not parse the regex '['")
end
it "should use the correct string name" do
lambda {
Parser.parse("]")
}.should raise_error(Parser::ParseError, "could not parse the regex ']'")
end
it "should allow multiple expressions inside a char class (i.e [a-zA-Z])" do
result = Parser.parse("[a-zA-Z]")
result.should be_a_kind_of(CharacterClass)
end
it "should be able to parse multiple ORs (a|b|c)" do
result = Parser.parse("a|b|c")
result.should == Alternation.new(Char.new("a"), Alternation.new(Char.new("b"), Char.new("c")))
end
it "should be able to parse (a|b)+" do
result = Parser.parse("(a|b)+")
result.should be_a_kind_of(Plus)
end
it "should be able to parse (a|b+)x" do
result = Parser.parse("(a|b+)x")
result.should be_a_kind_of(Concatenation)
end
it "should be able to parse (a|b)+x" do
- pending do
- result = Parser.parse("(a|b)+x")
- result.should be_a_kind_of(Concatenation)
- end
+ result = Parser.parse("(a|b)+x")
+ result.should be_a_kind_of(Concatenation)
end
it "should be able to parse (a)" do
result = Parser.parse("(a)")
result.should be_a_kind_of(Char)
end
it "should be able to parse 'a+b+'" do
- pending do
- result = Parser.parse("a+b+")
- result.should be_a_kind_of(Concatenation)
- end
+ result = Parser.parse("a+b+", true)
+ result.should be_a_kind_of(Concatenation)
end
-
+
+ it "should be able to parse 'a+b+c+'" do
+ result = Parser.parse("a+b+c+")
+ result.should be_a_kind_of(Concatenation)
+ end
+
+ it "should parse an eval 'a+b+c+" do
+ result = Parser.parse("a+b+c+")
+ result.should == (Plus.new(Char.new("a")) + Plus.new(Char.new("b")) + Plus.new(Char.new("c")))
+ end
+
describe "debugging info" do
it "should have debugging info off by default" do
Parser.new.should_not be_debugging
end
it "should be able to set debugging information" do
p = Parser.new
p.debug = true
p.should be_debugging
end
end
end
end
end
|
smtlaissezfaire/hopcroft
|
1be1a6995c60b5107fee2a236ee49923b64c976a
|
Use autoload instead of require
|
diff --git a/lib/hopcroft.rb b/lib/hopcroft.rb
index 1b0b77c..926c2cb 100644
--- a/lib/hopcroft.rb
+++ b/lib/hopcroft.rb
@@ -1,9 +1,11 @@
require "using"
require "facets/kernel/returning"
module Hopcroft
extend Using
+
+ Using.default_load_scheme = :autoload
using :Regex
using :Machine
end
|
smtlaissezfaire/hopcroft
|
0646215c414675f96589a326f0727a3927112122
|
Turn off debugging information
|
diff --git a/spec/hopcoft/regex/parser_spec.rb b/spec/hopcoft/regex/parser_spec.rb
index 6073c10..76e0577 100644
--- a/spec/hopcoft/regex/parser_spec.rb
+++ b/spec/hopcoft/regex/parser_spec.rb
@@ -1,316 +1,316 @@
require File.expand_path(File.dirname(__FILE__) + "/../../spec_helper")
module Hopcroft
module Regex
describe Parser do
it "should parse 'a' as a Char" do
Parser.parse("a").should == Char.new('a')
end
it "should parse '(a)' as a Char" do
Parser.parse("(a)").should == Char.new("a")
end
it "should parse 'b' as a Char" do
Parser.parse("b").should == Char.new("b")
end
it "should parse 'A' as a Char" do
Parser.parse("A").should == Char.new("A")
end
it "should parse 'Z' as a Char" do
Parser.parse("Z").should == Char.new("Z")
end
it "should parse '0' as a Char" do
Parser.parse("0").should == Char.new("0")
end
it "should parse '9' as a Char" do
Parser.parse("9").should == Char.new("9")
end
it "should parse 'ab' as two chars" do
result = Parser.parse("ab")
result.should == (Char.new("a") + Char.new("b"))
result.should be_a_kind_of(Concatenation)
end
it "should parse 'bc' as two chars" do
result = Parser.parse("bc")
result.should == (Char.new("b") + Char.new("c"))
result.should be_a_kind_of(Concatenation)
end
it "should parse a '.' as a Dot" do
Parser.parse(".").should == Dot.new
end
it "should parse a '\.' as a char dot" do
Parser.parse('\.').should == Char.new(".")
end
it "should parse '\..' as an escaped char + a dot" do
Parser.parse("\\..").should == (Char.new(".") + Dot.new)
end
it "should parse 'a*' as a kleen star" do
Parser.parse("a*").should == KleenStar.new(Char.new("a"))
end
it "should parse 'b*' as a kleen star" do
Parser.parse("b*").should == KleenStar.new(Char.new("b"))
end
it "should parse '\*' as the star char" do
Parser.parse("\\*").should == Char.new("*")
end
it "should parse 'a\*' as a followed by a char" do
Parser.parse("a\\*").should == (Char.new("a") + Char.new("*"))
end
it "should parse a? as an optional a" do
Parser.parse("a?").should == OptionalSymbol.new(Char.new("a"))
end
it "should parse b? as an optional b" do
Parser.parse("b?").should == OptionalSymbol.new(Char.new("b"))
end
it "should parse an escaped question mark as a char" do
Parser.parse("\\?").should == Char.new("?")
end
it "should parse a plus" do
Parser.parse("a+").should == Plus.new(Char.new("a"))
end
it "should parse 'b+'" do
Parser.parse("b+").should == Plus.new(Char.new("b"))
end
it "should parse an escaped plus" do
Parser.parse("\\+").should == Char.new("+")
end
it "should parse [a-z] as a character class" do
Parser.parse("[a-z]").should == CharacterClass.new("a-z")
end
it "should parse [b-c] as a character class" do
Parser.parse("[b-c]").should == CharacterClass.new("b-c")
end
it "should parse \ as an open bracket char" do
Parser.parse("\\[").should == Char.new("[")
end
it "should parse \] as a closed bracket char" do
Parser.parse("\\]").should == Char.new("]")
end
it "should parse 'ab' as a concatenation of a and b" do
char1 = Char.new("a")
char2 = Char.new("b")
Parser.parse("ab").should == Concatenation.new(char1, char2)
Parser.parse("ab").should be_a_kind_of(Concatenation)
end
it "should parse [a-z]* as a kleen star of a char class" do
Parser.parse("[a-z]*").should == KleenStar.new(CharacterClass.new("a-z"))
end
it "should parse alternation" do
result = Parser.parse("a|b")
result.should be_a_kind_of(Alternation)
result.should == Alternation.new(Char.new("a"), Char.new("b"))
end
it "should parse correct chars in the alternation" do
result = Parser.parse("x|y")
result.should be_a_kind_of(Alternation)
result.should == Alternation.new(Char.new("x"), Char.new("y"))
end
it "should parse '.|a' as an alternation" do
result = Parser.parse(".|a")
result.should be_a_kind_of(Alternation)
result.should == Alternation.new(Dot.new, Char.new("a"))
end
it "should allow a char class in the second position" do
result = Parser.parse(".|[a-z]")
result.should be_a_kind_of(Alternation)
result.should == Alternation.new(Dot.new, CharacterClass.new("a-z"))
result.expressions.last.should be_a_kind_of(CharacterClass)
end
it "should allow a plus after a char class" do
result = Parser.parse("[a-z]+")
result.should be_a_kind_of(Plus)
result.should == Plus.new(CharacterClass.new("a-z"))
end
it "should see an escaped plus as a char" do
Parser.parse('\+').should be_a_kind_of(Char)
end
it "should see an escaped plus with a argment in front of it as an escaped plus with a concatenation" do
result = Parser.parse('a\+')
result.should == Concatenation.new(Char.new("a"), Char.new("+"))
end
it "should allow an optional char class" do
result = Parser.parse("[a-z]?")
result.should == OptionalSymbol.new(CharacterClass.new("a-z"))
end
it "should parse with parens" do
result = Parser.parse("([a-z])")
result.should be_a_kind_of(CharacterClass)
end
it "should parse an escaped paren inside parens" do
result = Parser.parse('(\()')
result.should == Char.new("(")
end
it "should allow parens around a concatenation" do
result = Parser.parse("(ab)")
result.should == (Char.new("a") + Char.new("b"))
end
it "should parse matching escaped parens inside a set of parens" do
result = Parser.parse '(\(\))'
result.should == (Char.new("(") + Char.new(")"))
end
it "should parse two sets of parens around each other" do
result = Parser.parse "((ab))"
result.should == (Char.new("a") + Char.new("b"))
end
it "should parse a number" do
result = Parser.parse("9")
result.should == Char.new("9")
end
it "should parse any single non-special char (one that isn't in the regex set)" do
result = Parser.parse("$")
result.should == Char.new("$")
end
it "should parse an escaped or" do
result = Parser.parse('\|')
result.should == Char.new("|")
end
it "should parse an underscore" do
result = Parser.parse("_")
result.should == Char.new("_")
end
it "should parse a char class with one element" do
result = Parser.parse("[a]")
result.should == Char.new("a")
end
it "should parse a char class with one element but a different char" do
result = Parser.parse("[b]")
result.should == Char.new("b")
end
it "should parse an escaped special char inside a character class" do
result = Parser.parse('[\+]')
result.should be_a_kind_of(Char)
result.should == Char.new("+")
end
it "should parse two escaped chars within a char range" do
result = Parser.parse '[\a-\b]'
result.should be_a_kind_of(CharacterClass)
result.should == CharacterClass.new("\\a-\\b")
end
it "should NOT parse an empty char class" do
lambda {
Parser.parse("[]")
}.should raise_error(Parser::ParseError)
end
["+", "?", "*", "[", "]", "\\", "|"].each do |char|
it "should not parse the regex '#{char}'" do
lambda {
Parser.parse("#{char}")
}.should raise_error(Parser::ParseError)
end
end
it "should raise an error if it cannot parse a string" do
lambda {
Parser.parse("[")
}.should raise_error(Parser::ParseError, "could not parse the regex '['")
end
it "should use the correct string name" do
lambda {
Parser.parse("]")
}.should raise_error(Parser::ParseError, "could not parse the regex ']'")
end
it "should allow multiple expressions inside a char class (i.e [a-zA-Z])" do
result = Parser.parse("[a-zA-Z]")
result.should be_a_kind_of(CharacterClass)
end
it "should be able to parse multiple ORs (a|b|c)" do
result = Parser.parse("a|b|c")
result.should == Alternation.new(Char.new("a"), Alternation.new(Char.new("b"), Char.new("c")))
end
it "should be able to parse (a|b)+" do
result = Parser.parse("(a|b)+")
result.should be_a_kind_of(Plus)
end
it "should be able to parse (a|b+)x" do
result = Parser.parse("(a|b+)x")
result.should be_a_kind_of(Concatenation)
end
it "should be able to parse (a|b)+x" do
pending do
- result = Parser.parse("(a|b)+x", true)
+ result = Parser.parse("(a|b)+x")
result.should be_a_kind_of(Concatenation)
end
end
it "should be able to parse (a)" do
result = Parser.parse("(a)")
result.should be_a_kind_of(Char)
end
it "should be able to parse 'a+b+'" do
pending do
result = Parser.parse("a+b+")
result.should be_a_kind_of(Concatenation)
end
end
describe "debugging info" do
it "should have debugging info off by default" do
Parser.new.should_not be_debugging
end
it "should be able to set debugging information" do
p = Parser.new
p.debug = true
p.should be_debugging
end
end
end
end
end
|
smtlaissezfaire/hopcroft
|
21e722b7f9b01a4a11bc99f554b5573685b9cded
|
Add another pending concatenation spec
|
diff --git a/spec/hopcoft/regex/parser_spec.rb b/spec/hopcoft/regex/parser_spec.rb
index ae0be97..6073c10 100644
--- a/spec/hopcoft/regex/parser_spec.rb
+++ b/spec/hopcoft/regex/parser_spec.rb
@@ -1,309 +1,316 @@
require File.expand_path(File.dirname(__FILE__) + "/../../spec_helper")
module Hopcroft
module Regex
describe Parser do
it "should parse 'a' as a Char" do
Parser.parse("a").should == Char.new('a')
end
it "should parse '(a)' as a Char" do
Parser.parse("(a)").should == Char.new("a")
end
it "should parse 'b' as a Char" do
Parser.parse("b").should == Char.new("b")
end
it "should parse 'A' as a Char" do
Parser.parse("A").should == Char.new("A")
end
it "should parse 'Z' as a Char" do
Parser.parse("Z").should == Char.new("Z")
end
it "should parse '0' as a Char" do
Parser.parse("0").should == Char.new("0")
end
it "should parse '9' as a Char" do
Parser.parse("9").should == Char.new("9")
end
it "should parse 'ab' as two chars" do
result = Parser.parse("ab")
result.should == (Char.new("a") + Char.new("b"))
result.should be_a_kind_of(Concatenation)
end
it "should parse 'bc' as two chars" do
result = Parser.parse("bc")
result.should == (Char.new("b") + Char.new("c"))
result.should be_a_kind_of(Concatenation)
end
it "should parse a '.' as a Dot" do
Parser.parse(".").should == Dot.new
end
it "should parse a '\.' as a char dot" do
Parser.parse('\.').should == Char.new(".")
end
it "should parse '\..' as an escaped char + a dot" do
Parser.parse("\\..").should == (Char.new(".") + Dot.new)
end
it "should parse 'a*' as a kleen star" do
Parser.parse("a*").should == KleenStar.new(Char.new("a"))
end
it "should parse 'b*' as a kleen star" do
Parser.parse("b*").should == KleenStar.new(Char.new("b"))
end
it "should parse '\*' as the star char" do
Parser.parse("\\*").should == Char.new("*")
end
it "should parse 'a\*' as a followed by a char" do
Parser.parse("a\\*").should == (Char.new("a") + Char.new("*"))
end
it "should parse a? as an optional a" do
Parser.parse("a?").should == OptionalSymbol.new(Char.new("a"))
end
it "should parse b? as an optional b" do
Parser.parse("b?").should == OptionalSymbol.new(Char.new("b"))
end
it "should parse an escaped question mark as a char" do
Parser.parse("\\?").should == Char.new("?")
end
it "should parse a plus" do
Parser.parse("a+").should == Plus.new(Char.new("a"))
end
it "should parse 'b+'" do
Parser.parse("b+").should == Plus.new(Char.new("b"))
end
it "should parse an escaped plus" do
Parser.parse("\\+").should == Char.new("+")
end
it "should parse [a-z] as a character class" do
Parser.parse("[a-z]").should == CharacterClass.new("a-z")
end
it "should parse [b-c] as a character class" do
Parser.parse("[b-c]").should == CharacterClass.new("b-c")
end
it "should parse \ as an open bracket char" do
Parser.parse("\\[").should == Char.new("[")
end
it "should parse \] as a closed bracket char" do
Parser.parse("\\]").should == Char.new("]")
end
it "should parse 'ab' as a concatenation of a and b" do
char1 = Char.new("a")
char2 = Char.new("b")
Parser.parse("ab").should == Concatenation.new(char1, char2)
Parser.parse("ab").should be_a_kind_of(Concatenation)
end
it "should parse [a-z]* as a kleen star of a char class" do
Parser.parse("[a-z]*").should == KleenStar.new(CharacterClass.new("a-z"))
end
it "should parse alternation" do
result = Parser.parse("a|b")
result.should be_a_kind_of(Alternation)
result.should == Alternation.new(Char.new("a"), Char.new("b"))
end
it "should parse correct chars in the alternation" do
result = Parser.parse("x|y")
result.should be_a_kind_of(Alternation)
result.should == Alternation.new(Char.new("x"), Char.new("y"))
end
it "should parse '.|a' as an alternation" do
result = Parser.parse(".|a")
result.should be_a_kind_of(Alternation)
result.should == Alternation.new(Dot.new, Char.new("a"))
end
it "should allow a char class in the second position" do
result = Parser.parse(".|[a-z]")
result.should be_a_kind_of(Alternation)
result.should == Alternation.new(Dot.new, CharacterClass.new("a-z"))
result.expressions.last.should be_a_kind_of(CharacterClass)
end
it "should allow a plus after a char class" do
result = Parser.parse("[a-z]+")
result.should be_a_kind_of(Plus)
result.should == Plus.new(CharacterClass.new("a-z"))
end
it "should see an escaped plus as a char" do
Parser.parse('\+').should be_a_kind_of(Char)
end
it "should see an escaped plus with a argment in front of it as an escaped plus with a concatenation" do
result = Parser.parse('a\+')
result.should == Concatenation.new(Char.new("a"), Char.new("+"))
end
it "should allow an optional char class" do
result = Parser.parse("[a-z]?")
result.should == OptionalSymbol.new(CharacterClass.new("a-z"))
end
it "should parse with parens" do
result = Parser.parse("([a-z])")
result.should be_a_kind_of(CharacterClass)
end
it "should parse an escaped paren inside parens" do
result = Parser.parse('(\()')
result.should == Char.new("(")
end
it "should allow parens around a concatenation" do
result = Parser.parse("(ab)")
result.should == (Char.new("a") + Char.new("b"))
end
it "should parse matching escaped parens inside a set of parens" do
result = Parser.parse '(\(\))'
result.should == (Char.new("(") + Char.new(")"))
end
it "should parse two sets of parens around each other" do
result = Parser.parse "((ab))"
result.should == (Char.new("a") + Char.new("b"))
end
it "should parse a number" do
result = Parser.parse("9")
result.should == Char.new("9")
end
it "should parse any single non-special char (one that isn't in the regex set)" do
result = Parser.parse("$")
result.should == Char.new("$")
end
it "should parse an escaped or" do
result = Parser.parse('\|')
result.should == Char.new("|")
end
it "should parse an underscore" do
result = Parser.parse("_")
result.should == Char.new("_")
end
it "should parse a char class with one element" do
result = Parser.parse("[a]")
result.should == Char.new("a")
end
it "should parse a char class with one element but a different char" do
result = Parser.parse("[b]")
result.should == Char.new("b")
end
it "should parse an escaped special char inside a character class" do
result = Parser.parse('[\+]')
result.should be_a_kind_of(Char)
result.should == Char.new("+")
end
it "should parse two escaped chars within a char range" do
result = Parser.parse '[\a-\b]'
result.should be_a_kind_of(CharacterClass)
result.should == CharacterClass.new("\\a-\\b")
end
it "should NOT parse an empty char class" do
lambda {
Parser.parse("[]")
}.should raise_error(Parser::ParseError)
end
["+", "?", "*", "[", "]", "\\", "|"].each do |char|
it "should not parse the regex '#{char}'" do
lambda {
Parser.parse("#{char}")
}.should raise_error(Parser::ParseError)
end
end
it "should raise an error if it cannot parse a string" do
lambda {
Parser.parse("[")
}.should raise_error(Parser::ParseError, "could not parse the regex '['")
end
it "should use the correct string name" do
lambda {
Parser.parse("]")
}.should raise_error(Parser::ParseError, "could not parse the regex ']'")
end
it "should allow multiple expressions inside a char class (i.e [a-zA-Z])" do
result = Parser.parse("[a-zA-Z]")
result.should be_a_kind_of(CharacterClass)
end
it "should be able to parse multiple ORs (a|b|c)" do
result = Parser.parse("a|b|c")
result.should == Alternation.new(Char.new("a"), Alternation.new(Char.new("b"), Char.new("c")))
end
it "should be able to parse (a|b)+" do
result = Parser.parse("(a|b)+")
result.should be_a_kind_of(Plus)
end
it "should be able to parse (a|b+)x" do
result = Parser.parse("(a|b+)x")
result.should be_a_kind_of(Concatenation)
end
it "should be able to parse (a|b)+x" do
pending do
result = Parser.parse("(a|b)+x", true)
result.should be_a_kind_of(Concatenation)
end
end
it "should be able to parse (a)" do
result = Parser.parse("(a)")
result.should be_a_kind_of(Char)
end
+ it "should be able to parse 'a+b+'" do
+ pending do
+ result = Parser.parse("a+b+")
+ result.should be_a_kind_of(Concatenation)
+ end
+ end
+
describe "debugging info" do
it "should have debugging info off by default" do
Parser.new.should_not be_debugging
end
it "should be able to set debugging information" do
p = Parser.new
p.debug = true
p.should be_debugging
end
end
end
end
end
|
smtlaissezfaire/hopcroft
|
01b7d6afb29f6243796b5b7b96ad445b4fa34d6c
|
Add some pending specs
|
diff --git a/spec/hopcoft/integration_spec.rb b/spec/hopcoft/integration_spec.rb
index d430299..5f0389e 100644
--- a/spec/hopcoft/integration_spec.rb
+++ b/spec/hopcoft/integration_spec.rb
@@ -1,151 +1,165 @@
require File.expand_path(File.dirname(__FILE__) + "/../spec_helper")
module Hopcroft
describe "Integration tests" do
describe "the regex /a/" do
before do
@regex = Regex.compile("a")
end
it "should match 'a'" do
@regex.should be_matched_by("a")
end
it "should not match 'b'" do
@regex.should_not be_matched_by("b")
end
it "should not match 'abasdfasdf'" do
@regex.should_not be_matched_by('abasdfasdf')
end
end
describe "the regex /ab/" do
before do
@regex = Regex.compile("ab")
end
it "should match 'ab'" do
@regex.should be_matched_by("ab")
end
it "should not match 'x'" do
@regex.should_not be_matched_by("x")
end
it "should not match 'ba'" do
@regex.should_not be_matched_by("ba")
end
end
describe "the regex /a*/" do
before do
@regex = Regex.compile("a*")
end
it "should be matched by 'a'" do
@regex.should be_matched_by("a")
end
it "should be matched by the empty string" do
@regex.should be_matched_by("")
end
it "should be matched by 'aa'" do
@regex.should be_matched_by("aa")
end
it "should be matched by 'aaa'" do
@regex.should be_matched_by("aaa")
end
it "should not be matched by 'aab'" do
@regex.should_not be_matched_by("aab")
end
end
describe "the regex /a+/" do
before do
@regex = Regex.compile("a+")
end
it "should be matched by 'a'" do
@regex.should be_matched_by("a")
end
it "should NOT be matched by the empty string" do
@regex.should_not be_matched_by("")
end
it "should be matched by 'aa'" do
@regex.should be_matched_by("aa")
end
it "should not be matched by 'aab'" do
@regex.should_not be_matched_by("aab")
end
it "should be matched by 'aaa'" do
@regex.matches?("aaa")
@regex.should be_matched_by("aaa")
end
end
describe "the regex /a|b/" do
before do
@regex = Regex.compile("a|b")
end
it "should be matched by an 'a'" do
@regex.should be_matched_by("a")
end
it "should be matched by a 'b'" do
@regex.should be_matched_by("b")
end
it "should not be matched by a 'c'" do
@regex.should_not be_matched_by("c")
end
it "should not be matched with the string 'ab'" do
@regex.matched_by?("ab")
@regex.should_not be_matched_by("ab")
end
end
describe "the regex /(a|b)+/" do
before do
@regex = Regex.compile("(a|b)+")
end
it "should not match the empty string" do
@regex.should_not be_matched_by("")
end
it "should match an a" do
@regex.should be_matched_by("a")
end
it "should match 'b'" do
@regex.should be_matched_by("b")
end
it "should match 'aaa'" do
@regex.should be_matched_by("aaa")
end
it "should match 'bbb'" do
@regex.should be_matched_by("bbb")
end
it "should match 'ababababbbaaa'" do
@regex.should be_matched_by('ababababbbaaa')
end
it "should not be matched if it contains a different char" do
@regex.should_not be_matched_by("ababbbbaacaab")
end
end
+
+ describe "the regex (a|b)+x" do
+ before do
+ # @regex = Regex.compile("(a|b)+x")
+ end
+
+ it "should match 'ax'"
+
+ it "should match 'bx'"
+
+ it "should match 'ababx'"
+
+ it "should not match 'x'"
+ end
end
end
diff --git a/spec/hopcoft/regex/parser_spec.rb b/spec/hopcoft/regex/parser_spec.rb
index 3f39224..ae0be97 100644
--- a/spec/hopcoft/regex/parser_spec.rb
+++ b/spec/hopcoft/regex/parser_spec.rb
@@ -1,292 +1,309 @@
require File.expand_path(File.dirname(__FILE__) + "/../../spec_helper")
module Hopcroft
module Regex
describe Parser do
it "should parse 'a' as a Char" do
Parser.parse("a").should == Char.new('a')
end
it "should parse '(a)' as a Char" do
Parser.parse("(a)").should == Char.new("a")
end
it "should parse 'b' as a Char" do
Parser.parse("b").should == Char.new("b")
end
it "should parse 'A' as a Char" do
Parser.parse("A").should == Char.new("A")
end
it "should parse 'Z' as a Char" do
Parser.parse("Z").should == Char.new("Z")
end
it "should parse '0' as a Char" do
Parser.parse("0").should == Char.new("0")
end
it "should parse '9' as a Char" do
Parser.parse("9").should == Char.new("9")
end
it "should parse 'ab' as two chars" do
result = Parser.parse("ab")
result.should == (Char.new("a") + Char.new("b"))
result.should be_a_kind_of(Concatenation)
end
it "should parse 'bc' as two chars" do
result = Parser.parse("bc")
result.should == (Char.new("b") + Char.new("c"))
result.should be_a_kind_of(Concatenation)
end
it "should parse a '.' as a Dot" do
Parser.parse(".").should == Dot.new
end
it "should parse a '\.' as a char dot" do
Parser.parse('\.').should == Char.new(".")
end
it "should parse '\..' as an escaped char + a dot" do
Parser.parse("\\..").should == (Char.new(".") + Dot.new)
end
it "should parse 'a*' as a kleen star" do
Parser.parse("a*").should == KleenStar.new(Char.new("a"))
end
it "should parse 'b*' as a kleen star" do
Parser.parse("b*").should == KleenStar.new(Char.new("b"))
end
it "should parse '\*' as the star char" do
Parser.parse("\\*").should == Char.new("*")
end
it "should parse 'a\*' as a followed by a char" do
Parser.parse("a\\*").should == (Char.new("a") + Char.new("*"))
end
it "should parse a? as an optional a" do
Parser.parse("a?").should == OptionalSymbol.new(Char.new("a"))
end
it "should parse b? as an optional b" do
Parser.parse("b?").should == OptionalSymbol.new(Char.new("b"))
end
it "should parse an escaped question mark as a char" do
Parser.parse("\\?").should == Char.new("?")
end
it "should parse a plus" do
Parser.parse("a+").should == Plus.new(Char.new("a"))
end
it "should parse 'b+'" do
Parser.parse("b+").should == Plus.new(Char.new("b"))
end
it "should parse an escaped plus" do
Parser.parse("\\+").should == Char.new("+")
end
it "should parse [a-z] as a character class" do
Parser.parse("[a-z]").should == CharacterClass.new("a-z")
end
it "should parse [b-c] as a character class" do
Parser.parse("[b-c]").should == CharacterClass.new("b-c")
end
it "should parse \ as an open bracket char" do
Parser.parse("\\[").should == Char.new("[")
end
it "should parse \] as a closed bracket char" do
Parser.parse("\\]").should == Char.new("]")
end
it "should parse 'ab' as a concatenation of a and b" do
char1 = Char.new("a")
char2 = Char.new("b")
Parser.parse("ab").should == Concatenation.new(char1, char2)
Parser.parse("ab").should be_a_kind_of(Concatenation)
end
it "should parse [a-z]* as a kleen star of a char class" do
Parser.parse("[a-z]*").should == KleenStar.new(CharacterClass.new("a-z"))
end
it "should parse alternation" do
result = Parser.parse("a|b")
result.should be_a_kind_of(Alternation)
result.should == Alternation.new(Char.new("a"), Char.new("b"))
end
it "should parse correct chars in the alternation" do
result = Parser.parse("x|y")
result.should be_a_kind_of(Alternation)
result.should == Alternation.new(Char.new("x"), Char.new("y"))
end
it "should parse '.|a' as an alternation" do
result = Parser.parse(".|a")
result.should be_a_kind_of(Alternation)
result.should == Alternation.new(Dot.new, Char.new("a"))
end
it "should allow a char class in the second position" do
result = Parser.parse(".|[a-z]")
result.should be_a_kind_of(Alternation)
result.should == Alternation.new(Dot.new, CharacterClass.new("a-z"))
result.expressions.last.should be_a_kind_of(CharacterClass)
end
it "should allow a plus after a char class" do
result = Parser.parse("[a-z]+")
result.should be_a_kind_of(Plus)
result.should == Plus.new(CharacterClass.new("a-z"))
end
it "should see an escaped plus as a char" do
Parser.parse('\+').should be_a_kind_of(Char)
end
it "should see an escaped plus with a argment in front of it as an escaped plus with a concatenation" do
result = Parser.parse('a\+')
result.should == Concatenation.new(Char.new("a"), Char.new("+"))
end
it "should allow an optional char class" do
result = Parser.parse("[a-z]?")
result.should == OptionalSymbol.new(CharacterClass.new("a-z"))
end
it "should parse with parens" do
result = Parser.parse("([a-z])")
result.should be_a_kind_of(CharacterClass)
end
it "should parse an escaped paren inside parens" do
result = Parser.parse('(\()')
result.should == Char.new("(")
end
it "should allow parens around a concatenation" do
result = Parser.parse("(ab)")
result.should == (Char.new("a") + Char.new("b"))
end
it "should parse matching escaped parens inside a set of parens" do
result = Parser.parse '(\(\))'
result.should == (Char.new("(") + Char.new(")"))
end
it "should parse two sets of parens around each other" do
result = Parser.parse "((ab))"
result.should == (Char.new("a") + Char.new("b"))
end
it "should parse a number" do
result = Parser.parse("9")
result.should == Char.new("9")
end
it "should parse any single non-special char (one that isn't in the regex set)" do
result = Parser.parse("$")
result.should == Char.new("$")
end
it "should parse an escaped or" do
result = Parser.parse('\|')
result.should == Char.new("|")
end
it "should parse an underscore" do
result = Parser.parse("_")
result.should == Char.new("_")
end
it "should parse a char class with one element" do
result = Parser.parse("[a]")
result.should == Char.new("a")
end
it "should parse a char class with one element but a different char" do
result = Parser.parse("[b]")
result.should == Char.new("b")
end
it "should parse an escaped special char inside a character class" do
result = Parser.parse('[\+]')
result.should be_a_kind_of(Char)
result.should == Char.new("+")
end
it "should parse two escaped chars within a char range" do
result = Parser.parse '[\a-\b]'
result.should be_a_kind_of(CharacterClass)
result.should == CharacterClass.new("\\a-\\b")
end
it "should NOT parse an empty char class" do
lambda {
Parser.parse("[]")
}.should raise_error(Parser::ParseError)
end
["+", "?", "*", "[", "]", "\\", "|"].each do |char|
it "should not parse the regex '#{char}'" do
lambda {
Parser.parse("#{char}")
}.should raise_error(Parser::ParseError)
end
end
it "should raise an error if it cannot parse a string" do
lambda {
Parser.parse("[")
}.should raise_error(Parser::ParseError, "could not parse the regex '['")
end
it "should use the correct string name" do
lambda {
Parser.parse("]")
}.should raise_error(Parser::ParseError, "could not parse the regex ']'")
end
it "should allow multiple expressions inside a char class (i.e [a-zA-Z])" do
result = Parser.parse("[a-zA-Z]")
result.should be_a_kind_of(CharacterClass)
end
it "should be able to parse multiple ORs (a|b|c)" do
result = Parser.parse("a|b|c")
result.should == Alternation.new(Char.new("a"), Alternation.new(Char.new("b"), Char.new("c")))
end
it "should be able to parse (a|b)+" do
result = Parser.parse("(a|b)+")
result.should be_a_kind_of(Plus)
end
+ it "should be able to parse (a|b+)x" do
+ result = Parser.parse("(a|b+)x")
+ result.should be_a_kind_of(Concatenation)
+ end
+
+ it "should be able to parse (a|b)+x" do
+ pending do
+ result = Parser.parse("(a|b)+x", true)
+ result.should be_a_kind_of(Concatenation)
+ end
+ end
+
+ it "should be able to parse (a)" do
+ result = Parser.parse("(a)")
+ result.should be_a_kind_of(Char)
+ end
+
describe "debugging info" do
it "should have debugging info off by default" do
Parser.new.should_not be_debugging
end
it "should be able to set debugging information" do
p = Parser.new
p.debug = true
p.should be_debugging
end
end
end
end
end
|
smtlaissezfaire/hopcroft
|
5bee6c9ff5a52f0b42287c465df4bc7aed35f304
|
Allow multiple ranges / chars inside a CharClass
|
diff --git a/lib/hopcroft/regex/character_class.rb b/lib/hopcroft/regex/character_class.rb
index bbf62a0..608cb60 100644
--- a/lib/hopcroft/regex/character_class.rb
+++ b/lib/hopcroft/regex/character_class.rb
@@ -1,54 +1,75 @@
module Hopcroft
module Regex
class CharacterClass < Base
class InvalidCharacterClass < StandardError; end
class << self
- def new(str)
- one_char_long?(str) ? Char.new(str) : super
+ def new(*strs)
+ if strs.size == 1 && one_char_long?(strs.first)
+ Char.new(strs.first)
+ else
+ super
+ end
end
private
def one_char_long?(str)
str.size == 1 || (str.size == 2 && str[0] == "\\"[0])
end
end
- def initialize(str)
- super
+ def initialize(*strs)
+ @expressions = strs
raise InvalidCharacterClass if invalid_expression?
end
def build_machine(start_state)
each_symbol do |sym|
start_state.add_transition :symbol => sym, :final => true
end
end
def each_symbol(&block)
symbols.each(&block)
end
def symbols
- start, finish = expression.split("-")
- Range.new(start, finish).to_a.map { |e| e.to_s }
+ @expressions.map { |expr| symbols_for_expr(expr) }.flatten
end
def to_regex_s
- "#{OPEN_BRACKET}#{expression}#{CLOSE_BRACKET}"
+ "#{OPEN_BRACKET}#{expression_regex}#{CLOSE_BRACKET}"
end
private
+
+ def symbols_for_expr(expr)
+ if expr.include?("-")
+ Range.new(*expr.split("-")).to_a.map { |e| e.to_s }
+ else
+ expr
+ end
+ end
+
+ def expression_regex
+ @expressions.join("")
+ end
def valid_expression?
- one, two = expression.split("-")
- two > one
+ @expressions.all? do |expr|
+ if expr.include?("-")
+ one, two = expr.split("-")
+ two > one
+ else
+ true
+ end
+ end
end
def invalid_expression?
!valid_expression?
end
end
end
end
diff --git a/lib/hopcroft/regex/regex_parser.treetop b/lib/hopcroft/regex/regex_parser.treetop
index 1189067..b6ccb7c 100644
--- a/lib/hopcroft/regex/regex_parser.treetop
+++ b/lib/hopcroft/regex/regex_parser.treetop
@@ -1,91 +1,95 @@
module Hopcroft
module Regex
grammar TreetopRegex
# expr -> expr "*"
# -> expr "+"
# -> expr expr # concatenation
# -> "(" expr ")"
# -> /[a-zA-Z]+/
#
# after left factorization:
#
# expr -> "(" expr-prime ")"
# -> /[az-A-Z]/ expr-prime
#
# expr-prime -> ""
# -> "*"
# -> "+"
# -> expr
#
#
# See http://en.wikipedia.org/wiki/Left_recursion#Removing_immediate_left_recursion
rule regex
LEFT_PARENS regex RIGHT_PARENS subexpression <SyntaxNodes::ParenthesizedSubexpression> /
left_factored_expression subexpression <SyntaxNodes::LeftFactoredExpression>
end
rule subexpression
"*" <SyntaxNodes::KleenStar> /
"+" <SyntaxNodes::Plus> /
"?" <SyntaxNodes::OptionalSymbol> /
"|" regex <SyntaxNodes::Alternation> /
regex <SyntaxNodes::Concatenation> /
"" <SyntaxNodes::Epsilon>
end
rule left_factored_expression
dot / character_class / single_char
end
rule character_class
LEFT_BRACKET inner_char_class RIGHT_BRACKET <SyntaxNodes::CharClass>
end
rule inner_char_class
+ inner_char_class_expr+ <SyntaxNodes::MultipleInnerCharClassExpressions>
+ end
+
+ rule inner_char_class_expr
one:single_char "-" two:single_char <SyntaxNodes::TwoCharClass> /
single_char <SyntaxNodes::OneCharCharClass>
end
rule dot
"." <SyntaxNodes::Dot>
end
rule single_char
non_special_char / escaped_char
end
rule non_special_char
!("(" / ")" / "[" / "+" / "?" / "+" / "]" / "|" / "*" / "\\") ANY_CHAR <SyntaxNodes::NonSpecialChar>
end
rule escaped_char
ESCAPE any_char:ANY_CHAR <SyntaxNodes::EscapedChar>
end
rule ANY_CHAR
.
end
rule LEFT_BRACKET
"["
end
rule RIGHT_BRACKET
"]"
end
rule ESCAPE
"\\"
end
rule LEFT_PARENS
"("
end
rule RIGHT_PARENS
")"
end
end
end
end
diff --git a/lib/hopcroft/regex/syntax_nodes.rb b/lib/hopcroft/regex/syntax_nodes.rb
index e33d97f..2e28a20 100644
--- a/lib/hopcroft/regex/syntax_nodes.rb
+++ b/lib/hopcroft/regex/syntax_nodes.rb
@@ -1,136 +1,144 @@
module Hopcroft
module Regex
module SyntaxNodes
class Base < ::Treetop::Runtime::SyntaxNode; end
class MultiExpression < Base
def eval
second_expression.call(first_expression)
end
end
class LeftFactoredExpression < MultiExpression
def first_expression
@first_expression ||= left_factored_expression.eval.call(left_factored_expression)
end
def second_expression
@second_expression ||= subexpression.eval
end
end
class ParenthesizedSubexpression < MultiExpression
def first_expression
@first_expression ||= regex.eval
end
def second_expression
@second_expression ||= subexpression.eval
end
end
module Concatenation
def eval
lambda do |obj|
subexpressions = elements.map { |e| e.eval }.compact
if subexpressions.any?
Regex::Concatenation.new(obj, subexpressions.first.call(self))
else
obj
end
end
end
end
class Plus < Base
def eval
lambda do |obj|
Hopcroft::Regex::Plus.new(obj)
end
end
end
class KleenStar < Base
def eval
lambda do |obj|
Hopcroft::Regex::KleenStar.new(obj)
end
end
end
class OptionalSymbol < Base
def eval
lambda do |obj|
Hopcroft::Regex::OptionalSymbol.new(obj)
end
end
end
class Epsilon < Base
def eval
lambda do |obj|
obj
end
end
end
class Alternation < Base
def eval
lambda do |obj|
Regex::Alternation.new(obj, regex.eval)
end
end
end
class Dot < Base
def eval
lambda do |obj|
Regex::Dot.new
end
end
end
module NonSpecialChar
def eval
lambda do |obj|
Char.new(obj.text_value)
end
end
end
class CharClass < Base
def eval
lambda do
- CharacterClass.new(inner_char_class.eval.call)
+ CharacterClass.new(*inner_char_class.eval.call)
+ end
+ end
+ end
+
+ class MultipleInnerCharClassExpressions < Base
+ def eval
+ lambda do
+ elements.map { |e| e.eval.call }
end
end
end
class TwoCharClass < Base
def eval
lambda do
"#{one.text_value}-#{two.text_value}"
end
end
end
module OneCharCharClass
def eval
lambda do
text_value
end
end
end
class EscapedChar < Base
def eval
lambda do |obj|
Char.new(any_char.text_value)
end
end
end
end
end
end
\ No newline at end of file
diff --git a/spec/hopcoft/regex/charachter_class_spec.rb b/spec/hopcoft/regex/charachter_class_spec.rb
index aa89873..649e84f 100644
--- a/spec/hopcoft/regex/charachter_class_spec.rb
+++ b/spec/hopcoft/regex/charachter_class_spec.rb
@@ -1,77 +1,104 @@
require File.expand_path(File.dirname(__FILE__) + "/../../spec_helper")
module Hopcroft
module Regex
describe CharacterClass do
describe "checking for valid expressions" do
it "should not be valid with e-a" do
lambda {
CharacterClass.new("e-a")
}.should raise_error(CharacterClass::InvalidCharacterClass)
end
it "should return a Char if one char long" do
char = CharacterClass.new("a")
char.should be_a_kind_of(Regex::Char)
char.to_regex_s.should == "a"
end
it "should return a char if two chars long, and the first char is an escape" do
char = CharacterClass.new("\\a")
char.should be_a_kind_of(Regex::Char)
end
it "should be valid with a-e" do
klass = CharacterClass.new("a-e")
- klass.expression.should == "a-e"
+ klass.to_regex_s.should == "[a-e]"
end
it "should be invalid if the second char comes before the first in the alphabet" do
lambda {
CharacterClass.new("b-a")
}.should raise_error
end
it "should allow multiple sets of ranges" do
lambda {
CharacterClass.new("a-zA-Z")
}.should_not raise_error
end
it "should have the regex string" do
CharacterClass.new("a-c").to_regex_s.should == "[a-c]"
end
+
+ it "should be valid with multiple ranges" do
+ CharacterClass.new("a-c", "e-f").to_regex_s.should == "[a-ce-f]"
+ end
+
+ it "should allow a range and a single char" do
+ CharacterClass.new("a-c", "d").to_regex_s.should == "[a-cd]"
+ end
end
describe "matching" do
it "should match an a in [a-z]" do
klass = CharacterClass.new("a-z")
klass.matches?("a").should be_true
end
it "should match b in [a-z]" do
klass = CharacterClass.new("a-z")
klass.matches?("b").should be_true
end
it "should match an X in [A-Z]" do
klass = CharacterClass.new("A-Z")
klass.matches?("X").should be_true
end
it "should not match an a in [A-Z]" do
klass = CharacterClass.new("A-Z")
klass.matches?("a").should be_false
end
it "should match a number in [0-9]" do
klass = CharacterClass.new("0-9")
klass.matches?("0").should be_true
end
- it "should match in a multi-range expression [0-9a-eA-E]"
+ it "should match in a multi-range expression [0-9a-eA-E]" do
+ klass = CharacterClass.new("0-9", "a-e", "A-E")
+ klass.should be_matched_by("0")
+ klass.should be_matched_by("1")
+ klass.should be_matched_by("9")
+ klass.should be_matched_by("a")
+ klass.should be_matched_by("e")
+ klass.should be_matched_by("A")
+ klass.should be_matched_by("E")
+ klass.should_not be_matched_by("f")
+ klass.should_not be_matched_by("X")
+ end
+
+ it "should match when given a range and a single char" do
+ klass = CharacterClass.new("0-9", "a")
+ klass.should be_matched_by("0")
+ klass.should be_matched_by("9")
+ klass.should be_matched_by("a")
+ klass.should_not be_matched_by("b")
+ end
end
end
end
end
diff --git a/spec/hopcoft/regex/parser_spec.rb b/spec/hopcoft/regex/parser_spec.rb
index 868b60d..3f39224 100644
--- a/spec/hopcoft/regex/parser_spec.rb
+++ b/spec/hopcoft/regex/parser_spec.rb
@@ -1,289 +1,292 @@
require File.expand_path(File.dirname(__FILE__) + "/../../spec_helper")
module Hopcroft
module Regex
describe Parser do
it "should parse 'a' as a Char" do
Parser.parse("a").should == Char.new('a')
end
it "should parse '(a)' as a Char" do
Parser.parse("(a)").should == Char.new("a")
end
it "should parse 'b' as a Char" do
Parser.parse("b").should == Char.new("b")
end
it "should parse 'A' as a Char" do
Parser.parse("A").should == Char.new("A")
end
it "should parse 'Z' as a Char" do
Parser.parse("Z").should == Char.new("Z")
end
it "should parse '0' as a Char" do
Parser.parse("0").should == Char.new("0")
end
it "should parse '9' as a Char" do
Parser.parse("9").should == Char.new("9")
end
it "should parse 'ab' as two chars" do
result = Parser.parse("ab")
result.should == (Char.new("a") + Char.new("b"))
result.should be_a_kind_of(Concatenation)
end
it "should parse 'bc' as two chars" do
result = Parser.parse("bc")
result.should == (Char.new("b") + Char.new("c"))
result.should be_a_kind_of(Concatenation)
end
it "should parse a '.' as a Dot" do
Parser.parse(".").should == Dot.new
end
it "should parse a '\.' as a char dot" do
Parser.parse('\.').should == Char.new(".")
end
it "should parse '\..' as an escaped char + a dot" do
Parser.parse("\\..").should == (Char.new(".") + Dot.new)
end
it "should parse 'a*' as a kleen star" do
Parser.parse("a*").should == KleenStar.new(Char.new("a"))
end
it "should parse 'b*' as a kleen star" do
Parser.parse("b*").should == KleenStar.new(Char.new("b"))
end
it "should parse '\*' as the star char" do
Parser.parse("\\*").should == Char.new("*")
end
it "should parse 'a\*' as a followed by a char" do
Parser.parse("a\\*").should == (Char.new("a") + Char.new("*"))
end
it "should parse a? as an optional a" do
Parser.parse("a?").should == OptionalSymbol.new(Char.new("a"))
end
it "should parse b? as an optional b" do
Parser.parse("b?").should == OptionalSymbol.new(Char.new("b"))
end
it "should parse an escaped question mark as a char" do
Parser.parse("\\?").should == Char.new("?")
end
it "should parse a plus" do
Parser.parse("a+").should == Plus.new(Char.new("a"))
end
it "should parse 'b+'" do
Parser.parse("b+").should == Plus.new(Char.new("b"))
end
it "should parse an escaped plus" do
Parser.parse("\\+").should == Char.new("+")
end
it "should parse [a-z] as a character class" do
Parser.parse("[a-z]").should == CharacterClass.new("a-z")
end
it "should parse [b-c] as a character class" do
Parser.parse("[b-c]").should == CharacterClass.new("b-c")
end
it "should parse \ as an open bracket char" do
Parser.parse("\\[").should == Char.new("[")
end
it "should parse \] as a closed bracket char" do
Parser.parse("\\]").should == Char.new("]")
end
it "should parse 'ab' as a concatenation of a and b" do
char1 = Char.new("a")
char2 = Char.new("b")
Parser.parse("ab").should == Concatenation.new(char1, char2)
Parser.parse("ab").should be_a_kind_of(Concatenation)
end
it "should parse [a-z]* as a kleen star of a char class" do
Parser.parse("[a-z]*").should == KleenStar.new(CharacterClass.new("a-z"))
end
it "should parse alternation" do
result = Parser.parse("a|b")
result.should be_a_kind_of(Alternation)
result.should == Alternation.new(Char.new("a"), Char.new("b"))
end
it "should parse correct chars in the alternation" do
result = Parser.parse("x|y")
result.should be_a_kind_of(Alternation)
result.should == Alternation.new(Char.new("x"), Char.new("y"))
end
it "should parse '.|a' as an alternation" do
result = Parser.parse(".|a")
result.should be_a_kind_of(Alternation)
result.should == Alternation.new(Dot.new, Char.new("a"))
end
it "should allow a char class in the second position" do
result = Parser.parse(".|[a-z]")
result.should be_a_kind_of(Alternation)
result.should == Alternation.new(Dot.new, CharacterClass.new("a-z"))
result.expressions.last.should be_a_kind_of(CharacterClass)
end
it "should allow a plus after a char class" do
result = Parser.parse("[a-z]+")
result.should be_a_kind_of(Plus)
result.should == Plus.new(CharacterClass.new("a-z"))
end
it "should see an escaped plus as a char" do
Parser.parse('\+').should be_a_kind_of(Char)
end
it "should see an escaped plus with a argment in front of it as an escaped plus with a concatenation" do
result = Parser.parse('a\+')
result.should == Concatenation.new(Char.new("a"), Char.new("+"))
end
it "should allow an optional char class" do
result = Parser.parse("[a-z]?")
result.should == OptionalSymbol.new(CharacterClass.new("a-z"))
end
it "should parse with parens" do
result = Parser.parse("([a-z])")
result.should be_a_kind_of(CharacterClass)
end
it "should parse an escaped paren inside parens" do
result = Parser.parse('(\()')
result.should == Char.new("(")
end
it "should allow parens around a concatenation" do
result = Parser.parse("(ab)")
result.should == (Char.new("a") + Char.new("b"))
end
it "should parse matching escaped parens inside a set of parens" do
result = Parser.parse '(\(\))'
result.should == (Char.new("(") + Char.new(")"))
end
it "should parse two sets of parens around each other" do
result = Parser.parse "((ab))"
result.should == (Char.new("a") + Char.new("b"))
end
it "should parse a number" do
result = Parser.parse("9")
result.should == Char.new("9")
end
it "should parse any single non-special char (one that isn't in the regex set)" do
result = Parser.parse("$")
result.should == Char.new("$")
end
it "should parse an escaped or" do
result = Parser.parse('\|')
result.should == Char.new("|")
end
it "should parse an underscore" do
result = Parser.parse("_")
result.should == Char.new("_")
end
it "should parse a char class with one element" do
result = Parser.parse("[a]")
result.should == Char.new("a")
end
it "should parse a char class with one element but a different char" do
result = Parser.parse("[b]")
result.should == Char.new("b")
end
it "should parse an escaped special char inside a character class" do
result = Parser.parse('[\+]')
result.should be_a_kind_of(Char)
result.should == Char.new("+")
end
it "should parse two escaped chars within a char range" do
result = Parser.parse '[\a-\b]'
result.should be_a_kind_of(CharacterClass)
result.should == CharacterClass.new("\\a-\\b")
end
it "should NOT parse an empty char class" do
lambda {
Parser.parse("[]")
}.should raise_error(Parser::ParseError)
end
["+", "?", "*", "[", "]", "\\", "|"].each do |char|
it "should not parse the regex '#{char}'" do
lambda {
Parser.parse("#{char}")
}.should raise_error(Parser::ParseError)
end
end
it "should raise an error if it cannot parse a string" do
lambda {
Parser.parse("[")
}.should raise_error(Parser::ParseError, "could not parse the regex '['")
end
it "should use the correct string name" do
lambda {
Parser.parse("]")
}.should raise_error(Parser::ParseError, "could not parse the regex ']'")
end
- it "should allow multiple expressions inside a char class (i.e [a-zA-Z])"
+ it "should allow multiple expressions inside a char class (i.e [a-zA-Z])" do
+ result = Parser.parse("[a-zA-Z]")
+ result.should be_a_kind_of(CharacterClass)
+ end
it "should be able to parse multiple ORs (a|b|c)" do
result = Parser.parse("a|b|c")
result.should == Alternation.new(Char.new("a"), Alternation.new(Char.new("b"), Char.new("c")))
end
it "should be able to parse (a|b)+" do
result = Parser.parse("(a|b)+")
result.should be_a_kind_of(Plus)
end
describe "debugging info" do
it "should have debugging info off by default" do
Parser.new.should_not be_debugging
end
it "should be able to set debugging information" do
p = Parser.new
p.debug = true
p.should be_debugging
end
end
end
end
end
|
smtlaissezfaire/hopcroft
|
6687657af2fa569b56f45c1dc061003a610e1086
|
Fill in a pending spec
|
diff --git a/spec/hopcoft/regex/parser_spec.rb b/spec/hopcoft/regex/parser_spec.rb
index 41bba8d..868b60d 100644
--- a/spec/hopcoft/regex/parser_spec.rb
+++ b/spec/hopcoft/regex/parser_spec.rb
@@ -1,286 +1,289 @@
require File.expand_path(File.dirname(__FILE__) + "/../../spec_helper")
module Hopcroft
module Regex
describe Parser do
it "should parse 'a' as a Char" do
Parser.parse("a").should == Char.new('a')
end
it "should parse '(a)' as a Char" do
Parser.parse("(a)").should == Char.new("a")
end
it "should parse 'b' as a Char" do
Parser.parse("b").should == Char.new("b")
end
it "should parse 'A' as a Char" do
Parser.parse("A").should == Char.new("A")
end
it "should parse 'Z' as a Char" do
Parser.parse("Z").should == Char.new("Z")
end
it "should parse '0' as a Char" do
Parser.parse("0").should == Char.new("0")
end
it "should parse '9' as a Char" do
Parser.parse("9").should == Char.new("9")
end
it "should parse 'ab' as two chars" do
result = Parser.parse("ab")
result.should == (Char.new("a") + Char.new("b"))
result.should be_a_kind_of(Concatenation)
end
it "should parse 'bc' as two chars" do
result = Parser.parse("bc")
result.should == (Char.new("b") + Char.new("c"))
result.should be_a_kind_of(Concatenation)
end
it "should parse a '.' as a Dot" do
Parser.parse(".").should == Dot.new
end
it "should parse a '\.' as a char dot" do
Parser.parse('\.').should == Char.new(".")
end
it "should parse '\..' as an escaped char + a dot" do
Parser.parse("\\..").should == (Char.new(".") + Dot.new)
end
it "should parse 'a*' as a kleen star" do
Parser.parse("a*").should == KleenStar.new(Char.new("a"))
end
it "should parse 'b*' as a kleen star" do
Parser.parse("b*").should == KleenStar.new(Char.new("b"))
end
it "should parse '\*' as the star char" do
Parser.parse("\\*").should == Char.new("*")
end
it "should parse 'a\*' as a followed by a char" do
Parser.parse("a\\*").should == (Char.new("a") + Char.new("*"))
end
it "should parse a? as an optional a" do
Parser.parse("a?").should == OptionalSymbol.new(Char.new("a"))
end
it "should parse b? as an optional b" do
Parser.parse("b?").should == OptionalSymbol.new(Char.new("b"))
end
it "should parse an escaped question mark as a char" do
Parser.parse("\\?").should == Char.new("?")
end
it "should parse a plus" do
Parser.parse("a+").should == Plus.new(Char.new("a"))
end
it "should parse 'b+'" do
Parser.parse("b+").should == Plus.new(Char.new("b"))
end
it "should parse an escaped plus" do
Parser.parse("\\+").should == Char.new("+")
end
it "should parse [a-z] as a character class" do
Parser.parse("[a-z]").should == CharacterClass.new("a-z")
end
it "should parse [b-c] as a character class" do
Parser.parse("[b-c]").should == CharacterClass.new("b-c")
end
it "should parse \ as an open bracket char" do
Parser.parse("\\[").should == Char.new("[")
end
it "should parse \] as a closed bracket char" do
Parser.parse("\\]").should == Char.new("]")
end
it "should parse 'ab' as a concatenation of a and b" do
char1 = Char.new("a")
char2 = Char.new("b")
Parser.parse("ab").should == Concatenation.new(char1, char2)
Parser.parse("ab").should be_a_kind_of(Concatenation)
end
it "should parse [a-z]* as a kleen star of a char class" do
Parser.parse("[a-z]*").should == KleenStar.new(CharacterClass.new("a-z"))
end
it "should parse alternation" do
result = Parser.parse("a|b")
result.should be_a_kind_of(Alternation)
result.should == Alternation.new(Char.new("a"), Char.new("b"))
end
it "should parse correct chars in the alternation" do
result = Parser.parse("x|y")
result.should be_a_kind_of(Alternation)
result.should == Alternation.new(Char.new("x"), Char.new("y"))
end
it "should parse '.|a' as an alternation" do
result = Parser.parse(".|a")
result.should be_a_kind_of(Alternation)
result.should == Alternation.new(Dot.new, Char.new("a"))
end
it "should allow a char class in the second position" do
result = Parser.parse(".|[a-z]")
result.should be_a_kind_of(Alternation)
result.should == Alternation.new(Dot.new, CharacterClass.new("a-z"))
result.expressions.last.should be_a_kind_of(CharacterClass)
end
it "should allow a plus after a char class" do
result = Parser.parse("[a-z]+")
result.should be_a_kind_of(Plus)
result.should == Plus.new(CharacterClass.new("a-z"))
end
it "should see an escaped plus as a char" do
Parser.parse('\+').should be_a_kind_of(Char)
end
it "should see an escaped plus with a argment in front of it as an escaped plus with a concatenation" do
result = Parser.parse('a\+')
result.should == Concatenation.new(Char.new("a"), Char.new("+"))
end
it "should allow an optional char class" do
result = Parser.parse("[a-z]?")
result.should == OptionalSymbol.new(CharacterClass.new("a-z"))
end
it "should parse with parens" do
result = Parser.parse("([a-z])")
result.should be_a_kind_of(CharacterClass)
end
it "should parse an escaped paren inside parens" do
result = Parser.parse('(\()')
result.should == Char.new("(")
end
it "should allow parens around a concatenation" do
result = Parser.parse("(ab)")
result.should == (Char.new("a") + Char.new("b"))
end
it "should parse matching escaped parens inside a set of parens" do
result = Parser.parse '(\(\))'
result.should == (Char.new("(") + Char.new(")"))
end
it "should parse two sets of parens around each other" do
result = Parser.parse "((ab))"
result.should == (Char.new("a") + Char.new("b"))
end
it "should parse a number" do
result = Parser.parse("9")
result.should == Char.new("9")
end
it "should parse any single non-special char (one that isn't in the regex set)" do
result = Parser.parse("$")
result.should == Char.new("$")
end
it "should parse an escaped or" do
result = Parser.parse('\|')
result.should == Char.new("|")
end
it "should parse an underscore" do
result = Parser.parse("_")
result.should == Char.new("_")
end
it "should parse a char class with one element" do
result = Parser.parse("[a]")
result.should == Char.new("a")
end
it "should parse a char class with one element but a different char" do
result = Parser.parse("[b]")
result.should == Char.new("b")
end
it "should parse an escaped special char inside a character class" do
result = Parser.parse('[\+]')
result.should be_a_kind_of(Char)
result.should == Char.new("+")
end
it "should parse two escaped chars within a char range" do
result = Parser.parse '[\a-\b]'
result.should be_a_kind_of(CharacterClass)
result.should == CharacterClass.new("\\a-\\b")
end
it "should NOT parse an empty char class" do
lambda {
Parser.parse("[]")
}.should raise_error(Parser::ParseError)
end
["+", "?", "*", "[", "]", "\\", "|"].each do |char|
it "should not parse the regex '#{char}'" do
lambda {
Parser.parse("#{char}")
}.should raise_error(Parser::ParseError)
end
end
it "should raise an error if it cannot parse a string" do
lambda {
Parser.parse("[")
}.should raise_error(Parser::ParseError, "could not parse the regex '['")
end
it "should use the correct string name" do
lambda {
Parser.parse("]")
}.should raise_error(Parser::ParseError, "could not parse the regex ']'")
end
it "should allow multiple expressions inside a char class (i.e [a-zA-Z])"
- it "should be able to parse multiple ORs (a|b|c)"
+ it "should be able to parse multiple ORs (a|b|c)" do
+ result = Parser.parse("a|b|c")
+ result.should == Alternation.new(Char.new("a"), Alternation.new(Char.new("b"), Char.new("c")))
+ end
it "should be able to parse (a|b)+" do
result = Parser.parse("(a|b)+")
result.should be_a_kind_of(Plus)
end
describe "debugging info" do
it "should have debugging info off by default" do
Parser.new.should_not be_debugging
end
it "should be able to set debugging information" do
p = Parser.new
p.debug = true
p.should be_debugging
end
end
end
end
end
|
smtlaissezfaire/hopcroft
|
1c61aa26afbfb9ec80241fbfeb7a89544e051f4b
|
Fill in pending specs
|
diff --git a/spec/hopcoft/integration_spec.rb b/spec/hopcoft/integration_spec.rb
index 986f24d..d430299 100644
--- a/spec/hopcoft/integration_spec.rb
+++ b/spec/hopcoft/integration_spec.rb
@@ -1,126 +1,151 @@
require File.expand_path(File.dirname(__FILE__) + "/../spec_helper")
module Hopcroft
describe "Integration tests" do
describe "the regex /a/" do
before do
@regex = Regex.compile("a")
end
it "should match 'a'" do
@regex.should be_matched_by("a")
end
it "should not match 'b'" do
@regex.should_not be_matched_by("b")
end
it "should not match 'abasdfasdf'" do
@regex.should_not be_matched_by('abasdfasdf')
end
end
describe "the regex /ab/" do
before do
@regex = Regex.compile("ab")
end
it "should match 'ab'" do
@regex.should be_matched_by("ab")
end
it "should not match 'x'" do
@regex.should_not be_matched_by("x")
end
it "should not match 'ba'" do
@regex.should_not be_matched_by("ba")
end
end
describe "the regex /a*/" do
before do
@regex = Regex.compile("a*")
end
it "should be matched by 'a'" do
@regex.should be_matched_by("a")
end
it "should be matched by the empty string" do
@regex.should be_matched_by("")
end
it "should be matched by 'aa'" do
@regex.should be_matched_by("aa")
end
it "should be matched by 'aaa'" do
@regex.should be_matched_by("aaa")
end
it "should not be matched by 'aab'" do
@regex.should_not be_matched_by("aab")
end
end
describe "the regex /a+/" do
before do
@regex = Regex.compile("a+")
end
it "should be matched by 'a'" do
@regex.should be_matched_by("a")
end
it "should NOT be matched by the empty string" do
@regex.should_not be_matched_by("")
end
it "should be matched by 'aa'" do
@regex.should be_matched_by("aa")
end
it "should not be matched by 'aab'" do
@regex.should_not be_matched_by("aab")
end
it "should be matched by 'aaa'" do
@regex.matches?("aaa")
@regex.should be_matched_by("aaa")
end
end
describe "the regex /a|b/" do
before do
@regex = Regex.compile("a|b")
end
it "should be matched by an 'a'" do
@regex.should be_matched_by("a")
end
it "should be matched by a 'b'" do
@regex.should be_matched_by("b")
end
it "should not be matched by a 'c'" do
@regex.should_not be_matched_by("c")
end
it "should not be matched with the string 'ab'" do
@regex.matched_by?("ab")
@regex.should_not be_matched_by("ab")
end
end
describe "the regex /(a|b)+/" do
- it "should not match the empty string"
- it "should match an a"
- it "should match 'b'"
- it "should match 'aaa'"
- it "should match 'bbb'"
- it "should match 'ababababbbaaa'"
+ before do
+ @regex = Regex.compile("(a|b)+")
+ end
+
+ it "should not match the empty string" do
+ @regex.should_not be_matched_by("")
+ end
+
+ it "should match an a" do
+ @regex.should be_matched_by("a")
+ end
+
+ it "should match 'b'" do
+ @regex.should be_matched_by("b")
+ end
+
+ it "should match 'aaa'" do
+ @regex.should be_matched_by("aaa")
+ end
+
+ it "should match 'bbb'" do
+ @regex.should be_matched_by("bbb")
+ end
+
+ it "should match 'ababababbbaaa'" do
+ @regex.should be_matched_by('ababababbbaaa')
+ end
+
+ it "should not be matched if it contains a different char" do
+ @regex.should_not be_matched_by("ababbbbaacaab")
+ end
end
end
end
|
smtlaissezfaire/hopcroft
|
2f5721954fa8a9230b7452c6146a0e710d30b74d
|
More consistent handling of Epsilon states (empty strings) in the parser
|
diff --git a/lib/hopcroft/regex/syntax_nodes.rb b/lib/hopcroft/regex/syntax_nodes.rb
index 47d797f..e33d97f 100644
--- a/lib/hopcroft/regex/syntax_nodes.rb
+++ b/lib/hopcroft/regex/syntax_nodes.rb
@@ -1,136 +1,136 @@
module Hopcroft
module Regex
module SyntaxNodes
class Base < ::Treetop::Runtime::SyntaxNode; end
class MultiExpression < Base
def eval
- second_expression.respond_to?(:call) ?
- second_expression.call(first_expression) :
- first_expression
+ second_expression.call(first_expression)
end
end
class LeftFactoredExpression < MultiExpression
def first_expression
@first_expression ||= left_factored_expression.eval.call(left_factored_expression)
end
def second_expression
@second_expression ||= subexpression.eval
end
end
class ParenthesizedSubexpression < MultiExpression
def first_expression
@first_expression ||= regex.eval
end
def second_expression
@second_expression ||= subexpression.eval
end
end
module Concatenation
def eval
lambda do |obj|
subexpressions = elements.map { |e| e.eval }.compact
if subexpressions.any?
Regex::Concatenation.new(obj, subexpressions.first.call(self))
else
obj
end
end
end
end
class Plus < Base
def eval
lambda do |obj|
Hopcroft::Regex::Plus.new(obj)
end
end
end
class KleenStar < Base
def eval
lambda do |obj|
Hopcroft::Regex::KleenStar.new(obj)
end
end
end
class OptionalSymbol < Base
def eval
lambda do |obj|
Hopcroft::Regex::OptionalSymbol.new(obj)
end
end
end
class Epsilon < Base
def eval
- # nil
+ lambda do |obj|
+ obj
+ end
end
end
class Alternation < Base
def eval
lambda do |obj|
Regex::Alternation.new(obj, regex.eval)
end
end
end
class Dot < Base
def eval
lambda do |obj|
Regex::Dot.new
end
end
end
module NonSpecialChar
def eval
lambda do |obj|
Char.new(obj.text_value)
end
end
end
class CharClass < Base
def eval
lambda do
CharacterClass.new(inner_char_class.eval.call)
end
end
end
class TwoCharClass < Base
def eval
lambda do
"#{one.text_value}-#{two.text_value}"
end
end
end
module OneCharCharClass
def eval
lambda do
text_value
end
end
end
class EscapedChar < Base
def eval
lambda do |obj|
Char.new(any_char.text_value)
end
end
end
end
end
end
\ No newline at end of file
|
smtlaissezfaire/hopcroft
|
3b60eefe5bc15221238b925e36946b07e1c4487d
|
Treat concatenation more consistently in the parser
|
diff --git a/lib/hopcroft/regex/regex_parser.treetop b/lib/hopcroft/regex/regex_parser.treetop
index 881c8fc..1189067 100644
--- a/lib/hopcroft/regex/regex_parser.treetop
+++ b/lib/hopcroft/regex/regex_parser.treetop
@@ -1,91 +1,91 @@
module Hopcroft
module Regex
grammar TreetopRegex
# expr -> expr "*"
# -> expr "+"
# -> expr expr # concatenation
# -> "(" expr ")"
# -> /[a-zA-Z]+/
#
# after left factorization:
#
# expr -> "(" expr-prime ")"
# -> /[az-A-Z]/ expr-prime
#
# expr-prime -> ""
# -> "*"
# -> "+"
# -> expr
#
#
# See http://en.wikipedia.org/wiki/Left_recursion#Removing_immediate_left_recursion
rule regex
LEFT_PARENS regex RIGHT_PARENS subexpression <SyntaxNodes::ParenthesizedSubexpression> /
left_factored_expression subexpression <SyntaxNodes::LeftFactoredExpression>
end
rule subexpression
"*" <SyntaxNodes::KleenStar> /
"+" <SyntaxNodes::Plus> /
"?" <SyntaxNodes::OptionalSymbol> /
"|" regex <SyntaxNodes::Alternation> /
- regex /
+ regex <SyntaxNodes::Concatenation> /
"" <SyntaxNodes::Epsilon>
end
rule left_factored_expression
dot / character_class / single_char
end
rule character_class
LEFT_BRACKET inner_char_class RIGHT_BRACKET <SyntaxNodes::CharClass>
end
rule inner_char_class
one:single_char "-" two:single_char <SyntaxNodes::TwoCharClass> /
single_char <SyntaxNodes::OneCharCharClass>
end
rule dot
"." <SyntaxNodes::Dot>
end
rule single_char
non_special_char / escaped_char
end
rule non_special_char
!("(" / ")" / "[" / "+" / "?" / "+" / "]" / "|" / "*" / "\\") ANY_CHAR <SyntaxNodes::NonSpecialChar>
end
rule escaped_char
ESCAPE any_char:ANY_CHAR <SyntaxNodes::EscapedChar>
end
rule ANY_CHAR
.
end
rule LEFT_BRACKET
"["
end
rule RIGHT_BRACKET
"]"
end
rule ESCAPE
"\\"
end
rule LEFT_PARENS
"("
end
rule RIGHT_PARENS
")"
end
end
end
end
diff --git a/lib/hopcroft/regex/syntax_nodes.rb b/lib/hopcroft/regex/syntax_nodes.rb
index 6bd94a7..47d797f 100644
--- a/lib/hopcroft/regex/syntax_nodes.rb
+++ b/lib/hopcroft/regex/syntax_nodes.rb
@@ -1,126 +1,136 @@
module Hopcroft
module Regex
module SyntaxNodes
class Base < ::Treetop::Runtime::SyntaxNode; end
class MultiExpression < Base
def eval
- if second_expression.respond_to?(:call)
- second_expression.call(first_expression)
- elsif second_expression
- first_expression + second_expression
- else
+ second_expression.respond_to?(:call) ?
+ second_expression.call(first_expression) :
first_expression
- end
end
end
class LeftFactoredExpression < MultiExpression
def first_expression
@first_expression ||= left_factored_expression.eval.call(left_factored_expression)
end
def second_expression
@second_expression ||= subexpression.eval
end
end
class ParenthesizedSubexpression < MultiExpression
def first_expression
@first_expression ||= regex.eval
end
def second_expression
@second_expression ||= subexpression.eval
end
end
+ module Concatenation
+ def eval
+ lambda do |obj|
+ subexpressions = elements.map { |e| e.eval }.compact
+
+ if subexpressions.any?
+ Regex::Concatenation.new(obj, subexpressions.first.call(self))
+ else
+ obj
+ end
+ end
+ end
+ end
+
class Plus < Base
def eval
lambda do |obj|
Hopcroft::Regex::Plus.new(obj)
end
end
end
class KleenStar < Base
def eval
lambda do |obj|
Hopcroft::Regex::KleenStar.new(obj)
end
end
end
class OptionalSymbol < Base
def eval
lambda do |obj|
Hopcroft::Regex::OptionalSymbol.new(obj)
end
end
end
class Epsilon < Base
def eval
# nil
end
end
class Alternation < Base
def eval
lambda do |obj|
Regex::Alternation.new(obj, regex.eval)
end
end
end
class Dot < Base
def eval
lambda do |obj|
Regex::Dot.new
end
end
end
module NonSpecialChar
def eval
lambda do |obj|
Char.new(obj.text_value)
end
end
end
class CharClass < Base
def eval
lambda do
CharacterClass.new(inner_char_class.eval.call)
end
end
end
class TwoCharClass < Base
def eval
lambda do
"#{one.text_value}-#{two.text_value}"
end
end
end
module OneCharCharClass
def eval
lambda do
text_value
end
end
end
class EscapedChar < Base
def eval
lambda do |obj|
Char.new(any_char.text_value)
end
end
end
end
end
end
\ No newline at end of file
|
smtlaissezfaire/hopcroft
|
04f3b2565488ae8035620c3857c9e9aee79cabc0
|
Refactor to use common code
|
diff --git a/lib/hopcroft/regex/syntax_nodes.rb b/lib/hopcroft/regex/syntax_nodes.rb
index 7910161..6bd94a7 100644
--- a/lib/hopcroft/regex/syntax_nodes.rb
+++ b/lib/hopcroft/regex/syntax_nodes.rb
@@ -1,124 +1,126 @@
module Hopcroft
module Regex
module SyntaxNodes
class Base < ::Treetop::Runtime::SyntaxNode; end
- class LeftFactoredExpression < Base
+ class MultiExpression < Base
def eval
- char = left_factored_expression.eval.call(left_factored_expression)
- subexpr = subexpression.eval
-
- if subexpr.respond_to?(:call)
- subexpr.call(char)
- elsif subexpr
- char + subexpr
+ if second_expression.respond_to?(:call)
+ second_expression.call(first_expression)
+ elsif second_expression
+ first_expression + second_expression
else
- char
+ first_expression
end
end
end
- class ParenthesizedSubexpression < Base
- def eval
- char = regex.eval
- subexpr = subexpression.eval
-
- if subexpr.respond_to?(:call)
- subexpr.call(char)
- elsif subexpr
- char + subexpr
- else
- char
- end
+ class LeftFactoredExpression < MultiExpression
+ def first_expression
+ @first_expression ||= left_factored_expression.eval.call(left_factored_expression)
+ end
+
+ def second_expression
+ @second_expression ||= subexpression.eval
+ end
+ end
+
+ class ParenthesizedSubexpression < MultiExpression
+ def first_expression
+ @first_expression ||= regex.eval
+ end
+
+ def second_expression
+ @second_expression ||= subexpression.eval
end
end
class Plus < Base
def eval
lambda do |obj|
Hopcroft::Regex::Plus.new(obj)
end
end
end
class KleenStar < Base
def eval
lambda do |obj|
Hopcroft::Regex::KleenStar.new(obj)
end
end
end
class OptionalSymbol < Base
def eval
lambda do |obj|
Hopcroft::Regex::OptionalSymbol.new(obj)
end
end
end
class Epsilon < Base
def eval
# nil
end
end
class Alternation < Base
def eval
lambda do |obj|
Regex::Alternation.new(obj, regex.eval)
end
end
end
class Dot < Base
def eval
lambda do |obj|
Regex::Dot.new
end
end
end
module NonSpecialChar
def eval
lambda do |obj|
Char.new(obj.text_value)
end
end
end
class CharClass < Base
def eval
lambda do
CharacterClass.new(inner_char_class.eval.call)
end
end
end
class TwoCharClass < Base
def eval
lambda do
"#{one.text_value}-#{two.text_value}"
end
end
end
module OneCharCharClass
def eval
lambda do
text_value
end
end
end
class EscapedChar < Base
def eval
lambda do |obj|
Char.new(any_char.text_value)
end
end
end
end
end
end
\ No newline at end of file
|
smtlaissezfaire/hopcroft
|
3f6378d4fb8d8a06a68d86bb3dfe79ad9086c5f2
|
Rename leading_expression => LeftFactoredExpression
|
diff --git a/lib/hopcroft/regex/regex_parser.treetop b/lib/hopcroft/regex/regex_parser.treetop
index 0968523..881c8fc 100644
--- a/lib/hopcroft/regex/regex_parser.treetop
+++ b/lib/hopcroft/regex/regex_parser.treetop
@@ -1,91 +1,91 @@
module Hopcroft
module Regex
grammar TreetopRegex
# expr -> expr "*"
# -> expr "+"
# -> expr expr # concatenation
# -> "(" expr ")"
# -> /[a-zA-Z]+/
#
# after left factorization:
#
# expr -> "(" expr-prime ")"
# -> /[az-A-Z]/ expr-prime
#
# expr-prime -> ""
# -> "*"
# -> "+"
# -> expr
#
#
# See http://en.wikipedia.org/wiki/Left_recursion#Removing_immediate_left_recursion
rule regex
LEFT_PARENS regex RIGHT_PARENS subexpression <SyntaxNodes::ParenthesizedSubexpression> /
- leading_expression subexpression <SyntaxNodes::Subexpression>
+ left_factored_expression subexpression <SyntaxNodes::LeftFactoredExpression>
end
rule subexpression
"*" <SyntaxNodes::KleenStar> /
"+" <SyntaxNodes::Plus> /
"?" <SyntaxNodes::OptionalSymbol> /
"|" regex <SyntaxNodes::Alternation> /
regex /
"" <SyntaxNodes::Epsilon>
end
- rule leading_expression
+ rule left_factored_expression
dot / character_class / single_char
end
rule character_class
LEFT_BRACKET inner_char_class RIGHT_BRACKET <SyntaxNodes::CharClass>
end
rule inner_char_class
one:single_char "-" two:single_char <SyntaxNodes::TwoCharClass> /
single_char <SyntaxNodes::OneCharCharClass>
end
rule dot
"." <SyntaxNodes::Dot>
end
rule single_char
non_special_char / escaped_char
end
rule non_special_char
!("(" / ")" / "[" / "+" / "?" / "+" / "]" / "|" / "*" / "\\") ANY_CHAR <SyntaxNodes::NonSpecialChar>
end
rule escaped_char
ESCAPE any_char:ANY_CHAR <SyntaxNodes::EscapedChar>
end
rule ANY_CHAR
.
end
rule LEFT_BRACKET
"["
end
rule RIGHT_BRACKET
"]"
end
rule ESCAPE
"\\"
end
rule LEFT_PARENS
"("
end
rule RIGHT_PARENS
")"
end
end
end
end
diff --git a/lib/hopcroft/regex/syntax_nodes.rb b/lib/hopcroft/regex/syntax_nodes.rb
index 2f48ee9..7910161 100644
--- a/lib/hopcroft/regex/syntax_nodes.rb
+++ b/lib/hopcroft/regex/syntax_nodes.rb
@@ -1,124 +1,124 @@
module Hopcroft
module Regex
module SyntaxNodes
class Base < ::Treetop::Runtime::SyntaxNode; end
- class Subexpression < Base
+ class LeftFactoredExpression < Base
def eval
- char = leading_expression.eval.call(leading_expression)
+ char = left_factored_expression.eval.call(left_factored_expression)
subexpr = subexpression.eval
if subexpr.respond_to?(:call)
subexpr.call(char)
elsif subexpr
char + subexpr
else
char
end
end
end
class ParenthesizedSubexpression < Base
def eval
char = regex.eval
subexpr = subexpression.eval
if subexpr.respond_to?(:call)
subexpr.call(char)
elsif subexpr
char + subexpr
else
char
end
end
end
class Plus < Base
def eval
lambda do |obj|
Hopcroft::Regex::Plus.new(obj)
end
end
end
class KleenStar < Base
def eval
lambda do |obj|
Hopcroft::Regex::KleenStar.new(obj)
end
end
end
class OptionalSymbol < Base
def eval
lambda do |obj|
Hopcroft::Regex::OptionalSymbol.new(obj)
end
end
end
class Epsilon < Base
def eval
# nil
end
end
class Alternation < Base
def eval
lambda do |obj|
Regex::Alternation.new(obj, regex.eval)
end
end
end
class Dot < Base
def eval
lambda do |obj|
Regex::Dot.new
end
end
end
module NonSpecialChar
def eval
lambda do |obj|
Char.new(obj.text_value)
end
end
end
class CharClass < Base
def eval
lambda do
CharacterClass.new(inner_char_class.eval.call)
end
end
end
class TwoCharClass < Base
def eval
lambda do
"#{one.text_value}-#{two.text_value}"
end
end
end
module OneCharCharClass
def eval
lambda do
text_value
end
end
end
class EscapedChar < Base
def eval
lambda do |obj|
Char.new(any_char.text_value)
end
end
end
end
end
end
\ No newline at end of file
|
smtlaissezfaire/hopcroft
|
9d139f7a530745f33cfb697960b63efd818fbd38
|
Parse /(a|b)+/ properly
|
diff --git a/lib/hopcroft/regex/regex_parser.treetop b/lib/hopcroft/regex/regex_parser.treetop
index ed2b2be..0968523 100644
--- a/lib/hopcroft/regex/regex_parser.treetop
+++ b/lib/hopcroft/regex/regex_parser.treetop
@@ -1,91 +1,91 @@
module Hopcroft
module Regex
grammar TreetopRegex
# expr -> expr "*"
# -> expr "+"
# -> expr expr # concatenation
# -> "(" expr ")"
# -> /[a-zA-Z]+/
#
# after left factorization:
#
# expr -> "(" expr-prime ")"
# -> /[az-A-Z]/ expr-prime
#
# expr-prime -> ""
# -> "*"
# -> "+"
# -> expr
#
#
# See http://en.wikipedia.org/wiki/Left_recursion#Removing_immediate_left_recursion
rule regex
- LEFT_PARENS regex RIGHT_PARENS <SyntaxNodes::ParenthesizedExpression> /
- leading_expression subexpression <SyntaxNodes::Subexpression>
+ LEFT_PARENS regex RIGHT_PARENS subexpression <SyntaxNodes::ParenthesizedSubexpression> /
+ leading_expression subexpression <SyntaxNodes::Subexpression>
end
rule subexpression
"*" <SyntaxNodes::KleenStar> /
"+" <SyntaxNodes::Plus> /
"?" <SyntaxNodes::OptionalSymbol> /
"|" regex <SyntaxNodes::Alternation> /
regex /
"" <SyntaxNodes::Epsilon>
end
rule leading_expression
dot / character_class / single_char
end
rule character_class
LEFT_BRACKET inner_char_class RIGHT_BRACKET <SyntaxNodes::CharClass>
end
rule inner_char_class
one:single_char "-" two:single_char <SyntaxNodes::TwoCharClass> /
single_char <SyntaxNodes::OneCharCharClass>
end
rule dot
"." <SyntaxNodes::Dot>
end
rule single_char
non_special_char / escaped_char
end
rule non_special_char
!("(" / ")" / "[" / "+" / "?" / "+" / "]" / "|" / "*" / "\\") ANY_CHAR <SyntaxNodes::NonSpecialChar>
end
rule escaped_char
ESCAPE any_char:ANY_CHAR <SyntaxNodes::EscapedChar>
end
rule ANY_CHAR
.
end
rule LEFT_BRACKET
"["
end
rule RIGHT_BRACKET
"]"
end
rule ESCAPE
"\\"
end
rule LEFT_PARENS
"("
end
rule RIGHT_PARENS
")"
end
end
end
end
diff --git a/lib/hopcroft/regex/syntax_nodes.rb b/lib/hopcroft/regex/syntax_nodes.rb
index 1b4979d..2f48ee9 100644
--- a/lib/hopcroft/regex/syntax_nodes.rb
+++ b/lib/hopcroft/regex/syntax_nodes.rb
@@ -1,116 +1,124 @@
module Hopcroft
module Regex
module SyntaxNodes
class Base < ::Treetop::Runtime::SyntaxNode; end
class Subexpression < Base
def eval
char = leading_expression.eval.call(leading_expression)
-
subexpr = subexpression.eval
if subexpr.respond_to?(:call)
subexpr.call(char)
elsif subexpr
char + subexpr
else
char
end
end
end
- class ParenthesizedExpression < Base
+ class ParenthesizedSubexpression < Base
def eval
- regex.eval
+ char = regex.eval
+ subexpr = subexpression.eval
+
+ if subexpr.respond_to?(:call)
+ subexpr.call(char)
+ elsif subexpr
+ char + subexpr
+ else
+ char
+ end
end
end
class Plus < Base
def eval
lambda do |obj|
Hopcroft::Regex::Plus.new(obj)
end
end
end
class KleenStar < Base
def eval
lambda do |obj|
Hopcroft::Regex::KleenStar.new(obj)
end
end
end
class OptionalSymbol < Base
def eval
lambda do |obj|
Hopcroft::Regex::OptionalSymbol.new(obj)
end
end
end
class Epsilon < Base
def eval
# nil
end
end
class Alternation < Base
def eval
lambda do |obj|
Regex::Alternation.new(obj, regex.eval)
end
end
end
class Dot < Base
def eval
lambda do |obj|
Regex::Dot.new
end
end
end
module NonSpecialChar
def eval
lambda do |obj|
Char.new(obj.text_value)
end
end
end
class CharClass < Base
def eval
lambda do
CharacterClass.new(inner_char_class.eval.call)
end
end
end
class TwoCharClass < Base
def eval
lambda do
"#{one.text_value}-#{two.text_value}"
end
end
end
module OneCharCharClass
def eval
lambda do
text_value
end
end
end
class EscapedChar < Base
def eval
lambda do |obj|
Char.new(any_char.text_value)
end
end
end
end
end
end
\ No newline at end of file
diff --git a/spec/hopcoft/regex/parser_spec.rb b/spec/hopcoft/regex/parser_spec.rb
index a2a31b6..41bba8d 100644
--- a/spec/hopcoft/regex/parser_spec.rb
+++ b/spec/hopcoft/regex/parser_spec.rb
@@ -1,288 +1,286 @@
require File.expand_path(File.dirname(__FILE__) + "/../../spec_helper")
module Hopcroft
module Regex
describe Parser do
it "should parse 'a' as a Char" do
Parser.parse("a").should == Char.new('a')
end
it "should parse '(a)' as a Char" do
Parser.parse("(a)").should == Char.new("a")
end
it "should parse 'b' as a Char" do
Parser.parse("b").should == Char.new("b")
end
it "should parse 'A' as a Char" do
Parser.parse("A").should == Char.new("A")
end
it "should parse 'Z' as a Char" do
Parser.parse("Z").should == Char.new("Z")
end
it "should parse '0' as a Char" do
Parser.parse("0").should == Char.new("0")
end
it "should parse '9' as a Char" do
Parser.parse("9").should == Char.new("9")
end
it "should parse 'ab' as two chars" do
result = Parser.parse("ab")
result.should == (Char.new("a") + Char.new("b"))
result.should be_a_kind_of(Concatenation)
end
it "should parse 'bc' as two chars" do
result = Parser.parse("bc")
result.should == (Char.new("b") + Char.new("c"))
result.should be_a_kind_of(Concatenation)
end
it "should parse a '.' as a Dot" do
Parser.parse(".").should == Dot.new
end
it "should parse a '\.' as a char dot" do
Parser.parse('\.').should == Char.new(".")
end
it "should parse '\..' as an escaped char + a dot" do
Parser.parse("\\..").should == (Char.new(".") + Dot.new)
end
it "should parse 'a*' as a kleen star" do
Parser.parse("a*").should == KleenStar.new(Char.new("a"))
end
it "should parse 'b*' as a kleen star" do
Parser.parse("b*").should == KleenStar.new(Char.new("b"))
end
it "should parse '\*' as the star char" do
Parser.parse("\\*").should == Char.new("*")
end
it "should parse 'a\*' as a followed by a char" do
Parser.parse("a\\*").should == (Char.new("a") + Char.new("*"))
end
it "should parse a? as an optional a" do
Parser.parse("a?").should == OptionalSymbol.new(Char.new("a"))
end
it "should parse b? as an optional b" do
Parser.parse("b?").should == OptionalSymbol.new(Char.new("b"))
end
it "should parse an escaped question mark as a char" do
Parser.parse("\\?").should == Char.new("?")
end
it "should parse a plus" do
Parser.parse("a+").should == Plus.new(Char.new("a"))
end
it "should parse 'b+'" do
Parser.parse("b+").should == Plus.new(Char.new("b"))
end
it "should parse an escaped plus" do
Parser.parse("\\+").should == Char.new("+")
end
it "should parse [a-z] as a character class" do
Parser.parse("[a-z]").should == CharacterClass.new("a-z")
end
it "should parse [b-c] as a character class" do
Parser.parse("[b-c]").should == CharacterClass.new("b-c")
end
it "should parse \ as an open bracket char" do
Parser.parse("\\[").should == Char.new("[")
end
it "should parse \] as a closed bracket char" do
Parser.parse("\\]").should == Char.new("]")
end
it "should parse 'ab' as a concatenation of a and b" do
char1 = Char.new("a")
char2 = Char.new("b")
Parser.parse("ab").should == Concatenation.new(char1, char2)
Parser.parse("ab").should be_a_kind_of(Concatenation)
end
it "should parse [a-z]* as a kleen star of a char class" do
Parser.parse("[a-z]*").should == KleenStar.new(CharacterClass.new("a-z"))
end
it "should parse alternation" do
result = Parser.parse("a|b")
result.should be_a_kind_of(Alternation)
result.should == Alternation.new(Char.new("a"), Char.new("b"))
end
it "should parse correct chars in the alternation" do
result = Parser.parse("x|y")
result.should be_a_kind_of(Alternation)
result.should == Alternation.new(Char.new("x"), Char.new("y"))
end
it "should parse '.|a' as an alternation" do
result = Parser.parse(".|a")
result.should be_a_kind_of(Alternation)
result.should == Alternation.new(Dot.new, Char.new("a"))
end
it "should allow a char class in the second position" do
result = Parser.parse(".|[a-z]")
result.should be_a_kind_of(Alternation)
result.should == Alternation.new(Dot.new, CharacterClass.new("a-z"))
result.expressions.last.should be_a_kind_of(CharacterClass)
end
it "should allow a plus after a char class" do
result = Parser.parse("[a-z]+")
result.should be_a_kind_of(Plus)
result.should == Plus.new(CharacterClass.new("a-z"))
end
it "should see an escaped plus as a char" do
Parser.parse('\+').should be_a_kind_of(Char)
end
it "should see an escaped plus with a argment in front of it as an escaped plus with a concatenation" do
result = Parser.parse('a\+')
result.should == Concatenation.new(Char.new("a"), Char.new("+"))
end
it "should allow an optional char class" do
result = Parser.parse("[a-z]?")
result.should == OptionalSymbol.new(CharacterClass.new("a-z"))
end
it "should parse with parens" do
result = Parser.parse("([a-z])")
result.should be_a_kind_of(CharacterClass)
end
it "should parse an escaped paren inside parens" do
result = Parser.parse('(\()')
result.should == Char.new("(")
end
it "should allow parens around a concatenation" do
result = Parser.parse("(ab)")
result.should == (Char.new("a") + Char.new("b"))
end
it "should parse matching escaped parens inside a set of parens" do
result = Parser.parse '(\(\))'
result.should == (Char.new("(") + Char.new(")"))
end
it "should parse two sets of parens around each other" do
result = Parser.parse "((ab))"
result.should == (Char.new("a") + Char.new("b"))
end
it "should parse a number" do
result = Parser.parse("9")
result.should == Char.new("9")
end
it "should parse any single non-special char (one that isn't in the regex set)" do
result = Parser.parse("$")
result.should == Char.new("$")
end
it "should parse an escaped or" do
result = Parser.parse('\|')
result.should == Char.new("|")
end
it "should parse an underscore" do
result = Parser.parse("_")
result.should == Char.new("_")
end
it "should parse a char class with one element" do
result = Parser.parse("[a]")
result.should == Char.new("a")
end
it "should parse a char class with one element but a different char" do
result = Parser.parse("[b]")
result.should == Char.new("b")
end
it "should parse an escaped special char inside a character class" do
result = Parser.parse('[\+]')
result.should be_a_kind_of(Char)
result.should == Char.new("+")
end
it "should parse two escaped chars within a char range" do
result = Parser.parse '[\a-\b]'
result.should be_a_kind_of(CharacterClass)
result.should == CharacterClass.new("\\a-\\b")
end
it "should NOT parse an empty char class" do
lambda {
Parser.parse("[]")
}.should raise_error(Parser::ParseError)
end
["+", "?", "*", "[", "]", "\\", "|"].each do |char|
it "should not parse the regex '#{char}'" do
lambda {
Parser.parse("#{char}")
}.should raise_error(Parser::ParseError)
end
end
it "should raise an error if it cannot parse a string" do
lambda {
Parser.parse("[")
}.should raise_error(Parser::ParseError, "could not parse the regex '['")
end
it "should use the correct string name" do
lambda {
Parser.parse("]")
}.should raise_error(Parser::ParseError, "could not parse the regex ']'")
end
it "should allow multiple expressions inside a char class (i.e [a-zA-Z])"
it "should be able to parse multiple ORs (a|b|c)"
it "should be able to parse (a|b)+" do
- pending do
- result = Parser.parse("(a|b)+")
- result.should be_a_kind_of(Plus)
- end
+ result = Parser.parse("(a|b)+")
+ result.should be_a_kind_of(Plus)
end
describe "debugging info" do
it "should have debugging info off by default" do
Parser.new.should_not be_debugging
end
it "should be able to set debugging information" do
p = Parser.new
p.debug = true
p.should be_debugging
end
end
end
end
end
|
smtlaissezfaire/hopcroft
|
c426a4941e5bb708ecf01cadb0f97f528fa490d3
|
Replace parent nodes of parser with left-factorization (necessary for LL grammars which contain left recursion)
|
diff --git a/lib/hopcroft/regex.rb b/lib/hopcroft/regex.rb
index 185ef01..308919c 100644
--- a/lib/hopcroft/regex.rb
+++ b/lib/hopcroft/regex.rb
@@ -1,34 +1,34 @@
require "treetop"
module Hopcroft
module Regex
SPECIAL_CHARS = [
DOT = ".",
PLUS = "+",
QUESTION = "?",
STAR = "*",
OPEN_BRACKET = "[",
CLOSE_BRACKET = "]",
ESCAPE_CHAR = "\\",
ALTERNATION = "|"
]
extend Using
using :Base
using :Char
using :KleenStar
using :Plus
using :Dot
using :CharacterClass
using :OptionalSymbol
using :Concatenation
using :Alternation
-
+ using :SyntaxNodes
using :Parser
def self.compile(from_string)
Parser.parse(from_string)
end
end
end
diff --git a/lib/hopcroft/regex/regex_parser.treetop b/lib/hopcroft/regex/regex_parser.treetop
index ae30193..ed2b2be 100644
--- a/lib/hopcroft/regex/regex_parser.treetop
+++ b/lib/hopcroft/regex/regex_parser.treetop
@@ -1,155 +1,91 @@
module Hopcroft
module Regex
grammar TreetopRegex
- rule a_regex
- expr_with_parens / expr_without_parens
- end
-
- rule expr_with_parens
- "(" expr_with_parens ")" {
- def eval
- expr_with_parens.eval
- end
- }
- /
- expr_without_parens
- end
-
- rule expr_without_parens
- multi_valued_expression
- end
-
- rule multi_valued_expression
- car:single_value_expression cdr:single_value_expression* {
- def eval
- if cdr.elements.any?
- a = [car.eval, cdr.elements.map { |element| element.eval }]
- a.flatten!
- a.inject { |collection, element| collection + element }
- else
- car.eval
- end
- end
- }
- end
-
- rule single_value_expression
- escaped_char / expr_without_escaping
- end
-
- rule expr_without_escaping
- kleen_star / expr_without_kleen_star
- end
-
- rule expr_without_kleen_star
- alternation / expr_without_alternation
- end
-
- rule expr_without_alternation
- plus_expr / expr_without_plus
- end
-
- rule expr_without_plus
- optional_expr / expr_without_optional_expr
- end
-
- rule expr_without_optional_expr
- char_class / dot / unescaped_char
- end
-
- rule alternation
- one:expr_without_alternation "|" two:expr_without_alternation {
- def eval
- Alternation.new(one.eval, two.eval)
- end
- }
- end
-
- rule plus_expr
- expr_without_plus "+" {
- def eval
- Plus.new(expr_without_plus.eval)
- end
- }
- end
-
- rule optional_expr
- expr_without_optional_expr "?" {
- def eval
- OptionalSymbol.new(expr_without_optional_expr.eval)
- end
- }
- end
-
- rule char_class
- "[" char_class_expr "]" {
- def eval
- CharacterClass.new(char_class_expr.eval)
- end
- }
- end
-
- rule char_class_expr
- one:char "-" two:char {
- def eval
- "#{one.text_value}-#{two.text_value}"
- end
- }
- /
- char {
- def eval
- text_value
- end
- }
- end
-
- rule kleen_star
- expr_without_kleen_star "*" {
- def eval
- KleenStar.new(expr_without_kleen_star.eval)
- end
- }
- end
-
+ # expr -> expr "*"
+ # -> expr "+"
+ # -> expr expr # concatenation
+ # -> "(" expr ")"
+ # -> /[a-zA-Z]+/
+ #
+ # after left factorization:
+ #
+ # expr -> "(" expr-prime ")"
+ # -> /[az-A-Z]/ expr-prime
+ #
+ # expr-prime -> ""
+ # -> "*"
+ # -> "+"
+ # -> expr
+ #
+ #
+ # See http://en.wikipedia.org/wiki/Left_recursion#Removing_immediate_left_recursion
+
+ rule regex
+ LEFT_PARENS regex RIGHT_PARENS <SyntaxNodes::ParenthesizedExpression> /
+ leading_expression subexpression <SyntaxNodes::Subexpression>
+ end
+
+ rule subexpression
+ "*" <SyntaxNodes::KleenStar> /
+ "+" <SyntaxNodes::Plus> /
+ "?" <SyntaxNodes::OptionalSymbol> /
+ "|" regex <SyntaxNodes::Alternation> /
+ regex /
+ "" <SyntaxNodes::Epsilon>
+ end
+
+ rule leading_expression
+ dot / character_class / single_char
+ end
+
+ rule character_class
+ LEFT_BRACKET inner_char_class RIGHT_BRACKET <SyntaxNodes::CharClass>
+ end
+
+ rule inner_char_class
+ one:single_char "-" two:single_char <SyntaxNodes::TwoCharClass> /
+ single_char <SyntaxNodes::OneCharCharClass>
+ end
+
rule dot
- "." {
- def eval
- Dot.new
- end
- }
+ "." <SyntaxNodes::Dot>
end
-
- rule char
- unescaped_char / escaped_char
+
+ rule single_char
+ non_special_char / escaped_char
end
-
- rule unescaped_char
- non_special_char {
- def eval
- Char.new(text_value)
- end
- }
+
+ rule non_special_char
+ !("(" / ")" / "[" / "+" / "?" / "+" / "]" / "|" / "*" / "\\") ANY_CHAR <SyntaxNodes::NonSpecialChar>
end
-
+
rule escaped_char
- escape_char any_char {
- def eval
- Char.new(any_char.text_value)
- end
- }
+ ESCAPE any_char:ANY_CHAR <SyntaxNodes::EscapedChar>
end
-
- rule non_special_char
- !("(" / ")" / "[" / "+" / "?" / "+" / "]" / "|" / "*" / "\\") .
- end
-
- rule any_char
+
+ rule ANY_CHAR
.
end
-
- rule escape_char
+
+ rule LEFT_BRACKET
+ "["
+ end
+
+ rule RIGHT_BRACKET
+ "]"
+ end
+
+ rule ESCAPE
"\\"
end
+
+ rule LEFT_PARENS
+ "("
+ end
+
+ rule RIGHT_PARENS
+ ")"
+ end
end
end
end
diff --git a/lib/hopcroft/regex/syntax_nodes.rb b/lib/hopcroft/regex/syntax_nodes.rb
new file mode 100644
index 0000000..1b4979d
--- /dev/null
+++ b/lib/hopcroft/regex/syntax_nodes.rb
@@ -0,0 +1,116 @@
+module Hopcroft
+ module Regex
+ module SyntaxNodes
+ class Base < ::Treetop::Runtime::SyntaxNode; end
+
+ class Subexpression < Base
+ def eval
+ char = leading_expression.eval.call(leading_expression)
+
+ subexpr = subexpression.eval
+
+ if subexpr.respond_to?(:call)
+ subexpr.call(char)
+ elsif subexpr
+ char + subexpr
+ else
+ char
+ end
+ end
+ end
+
+ class ParenthesizedExpression < Base
+ def eval
+ regex.eval
+ end
+ end
+
+ class Plus < Base
+ def eval
+ lambda do |obj|
+ Hopcroft::Regex::Plus.new(obj)
+ end
+ end
+ end
+
+ class KleenStar < Base
+ def eval
+ lambda do |obj|
+ Hopcroft::Regex::KleenStar.new(obj)
+ end
+ end
+ end
+
+ class OptionalSymbol < Base
+ def eval
+ lambda do |obj|
+ Hopcroft::Regex::OptionalSymbol.new(obj)
+ end
+ end
+ end
+
+ class Epsilon < Base
+ def eval
+ # nil
+ end
+ end
+
+ class Alternation < Base
+ def eval
+ lambda do |obj|
+ Regex::Alternation.new(obj, regex.eval)
+ end
+ end
+ end
+
+ class Dot < Base
+ def eval
+ lambda do |obj|
+ Regex::Dot.new
+ end
+ end
+ end
+
+ module NonSpecialChar
+ def eval
+ lambda do |obj|
+ Char.new(obj.text_value)
+ end
+ end
+ end
+
+ class CharClass < Base
+ def eval
+ lambda do
+ CharacterClass.new(inner_char_class.eval.call)
+ end
+ end
+ end
+
+ class TwoCharClass < Base
+ def eval
+ lambda do
+ "#{one.text_value}-#{two.text_value}"
+ end
+ end
+ end
+
+ module OneCharCharClass
+ def eval
+ lambda do
+ text_value
+ end
+ end
+ end
+
+ class EscapedChar < Base
+ def eval
+ lambda do |obj|
+ Char.new(any_char.text_value)
+ end
+ end
+ end
+ end
+ end
+end
+
\ No newline at end of file
diff --git a/spec/hopcoft/regex/parser_spec.rb b/spec/hopcoft/regex/parser_spec.rb
index bc365d1..a2a31b6 100644
--- a/spec/hopcoft/regex/parser_spec.rb
+++ b/spec/hopcoft/regex/parser_spec.rb
@@ -1,254 +1,288 @@
require File.expand_path(File.dirname(__FILE__) + "/../../spec_helper")
module Hopcroft
module Regex
describe Parser do
it "should parse 'a' as a Char" do
Parser.parse("a").should == Char.new('a')
end
+
+ it "should parse '(a)' as a Char" do
+ Parser.parse("(a)").should == Char.new("a")
+ end
it "should parse 'b' as a Char" do
Parser.parse("b").should == Char.new("b")
end
-
+
+ it "should parse 'A' as a Char" do
+ Parser.parse("A").should == Char.new("A")
+ end
+
+ it "should parse 'Z' as a Char" do
+ Parser.parse("Z").should == Char.new("Z")
+ end
+
+ it "should parse '0' as a Char" do
+ Parser.parse("0").should == Char.new("0")
+ end
+
+ it "should parse '9' as a Char" do
+ Parser.parse("9").should == Char.new("9")
+ end
+
it "should parse 'ab' as two chars" do
- Parser.parse("ab").should == (Char.new("a") + Char.new("b"))
+ result = Parser.parse("ab")
+ result.should == (Char.new("a") + Char.new("b"))
+ result.should be_a_kind_of(Concatenation)
end
-
+
+ it "should parse 'bc' as two chars" do
+ result = Parser.parse("bc")
+ result.should == (Char.new("b") + Char.new("c"))
+ result.should be_a_kind_of(Concatenation)
+ end
+
it "should parse a '.' as a Dot" do
Parser.parse(".").should == Dot.new
end
-
+
it "should parse a '\.' as a char dot" do
Parser.parse('\.').should == Char.new(".")
end
-
+
it "should parse '\..' as an escaped char + a dot" do
Parser.parse("\\..").should == (Char.new(".") + Dot.new)
end
-
+
it "should parse 'a*' as a kleen star" do
Parser.parse("a*").should == KleenStar.new(Char.new("a"))
end
-
+
it "should parse 'b*' as a kleen star" do
Parser.parse("b*").should == KleenStar.new(Char.new("b"))
end
-
+
it "should parse '\*' as the star char" do
Parser.parse("\\*").should == Char.new("*")
end
-
+
it "should parse 'a\*' as a followed by a char" do
Parser.parse("a\\*").should == (Char.new("a") + Char.new("*"))
end
-
+
it "should parse a? as an optional a" do
Parser.parse("a?").should == OptionalSymbol.new(Char.new("a"))
end
-
+
it "should parse b? as an optional b" do
Parser.parse("b?").should == OptionalSymbol.new(Char.new("b"))
end
-
+
it "should parse an escaped question mark as a char" do
Parser.parse("\\?").should == Char.new("?")
end
-
+
it "should parse a plus" do
Parser.parse("a+").should == Plus.new(Char.new("a"))
end
-
+
it "should parse 'b+'" do
Parser.parse("b+").should == Plus.new(Char.new("b"))
end
-
+
it "should parse an escaped plus" do
Parser.parse("\\+").should == Char.new("+")
end
-
+
it "should parse [a-z] as a character class" do
Parser.parse("[a-z]").should == CharacterClass.new("a-z")
end
-
+
it "should parse [b-c] as a character class" do
Parser.parse("[b-c]").should == CharacterClass.new("b-c")
end
-
+
it "should parse \ as an open bracket char" do
Parser.parse("\\[").should == Char.new("[")
end
-
+
it "should parse \] as a closed bracket char" do
Parser.parse("\\]").should == Char.new("]")
end
-
+
it "should parse 'ab' as a concatenation of a and b" do
char1 = Char.new("a")
char2 = Char.new("b")
-
+
Parser.parse("ab").should == Concatenation.new(char1, char2)
+ Parser.parse("ab").should be_a_kind_of(Concatenation)
end
-
+
it "should parse [a-z]* as a kleen star of a char class" do
Parser.parse("[a-z]*").should == KleenStar.new(CharacterClass.new("a-z"))
end
-
+
it "should parse alternation" do
result = Parser.parse("a|b")
result.should be_a_kind_of(Alternation)
result.should == Alternation.new(Char.new("a"), Char.new("b"))
end
-
+
it "should parse correct chars in the alternation" do
result = Parser.parse("x|y")
result.should be_a_kind_of(Alternation)
result.should == Alternation.new(Char.new("x"), Char.new("y"))
end
-
+
it "should parse '.|a' as an alternation" do
result = Parser.parse(".|a")
result.should be_a_kind_of(Alternation)
result.should == Alternation.new(Dot.new, Char.new("a"))
end
-
+
it "should allow a char class in the second position" do
result = Parser.parse(".|[a-z]")
result.should be_a_kind_of(Alternation)
result.should == Alternation.new(Dot.new, CharacterClass.new("a-z"))
result.expressions.last.should be_a_kind_of(CharacterClass)
end
-
+
it "should allow a plus after a char class" do
result = Parser.parse("[a-z]+")
result.should be_a_kind_of(Plus)
result.should == Plus.new(CharacterClass.new("a-z"))
end
-
+
it "should see an escaped plus as a char" do
Parser.parse('\+').should be_a_kind_of(Char)
end
-
+
it "should see an escaped plus with a argment in front of it as an escaped plus with a concatenation" do
result = Parser.parse('a\+')
result.should == Concatenation.new(Char.new("a"), Char.new("+"))
end
-
+
it "should allow an optional char class" do
result = Parser.parse("[a-z]?")
result.should == OptionalSymbol.new(CharacterClass.new("a-z"))
end
-
+
it "should parse with parens" do
result = Parser.parse("([a-z])")
result.should be_a_kind_of(CharacterClass)
end
-
+
it "should parse an escaped paren inside parens" do
- result = Parser.parse("(\\()")
+ result = Parser.parse('(\()')
result.should == Char.new("(")
end
-
+
it "should allow parens around a concatenation" do
result = Parser.parse("(ab)")
result.should == (Char.new("a") + Char.new("b"))
end
-
+
it "should parse matching escaped parens inside a set of parens" do
result = Parser.parse '(\(\))'
result.should == (Char.new("(") + Char.new(")"))
end
-
+
it "should parse two sets of parens around each other" do
result = Parser.parse "((ab))"
result.should == (Char.new("a") + Char.new("b"))
end
-
+
it "should parse a number" do
result = Parser.parse("9")
result.should == Char.new("9")
end
-
+
it "should parse any single non-special char (one that isn't in the regex set)" do
result = Parser.parse("$")
result.should == Char.new("$")
end
-
+
it "should parse an escaped or" do
result = Parser.parse('\|')
result.should == Char.new("|")
end
-
+
it "should parse an underscore" do
result = Parser.parse("_")
result.should == Char.new("_")
end
-
+
it "should parse a char class with one element" do
result = Parser.parse("[a]")
result.should == Char.new("a")
end
-
+
+ it "should parse a char class with one element but a different char" do
+ result = Parser.parse("[b]")
+ result.should == Char.new("b")
+ end
+
it "should parse an escaped special char inside a character class" do
result = Parser.parse('[\+]')
result.should be_a_kind_of(Char)
result.should == Char.new("+")
end
-
+
it "should parse two escaped chars within a char range" do
result = Parser.parse '[\a-\b]'
result.should be_a_kind_of(CharacterClass)
result.should == CharacterClass.new("\\a-\\b")
end
-
+
it "should NOT parse an empty char class" do
lambda {
Parser.parse("[]")
}.should raise_error(Parser::ParseError)
end
-
+
["+", "?", "*", "[", "]", "\\", "|"].each do |char|
it "should not parse the regex '#{char}'" do
lambda {
Parser.parse("#{char}")
}.should raise_error(Parser::ParseError)
end
end
-
+
it "should raise an error if it cannot parse a string" do
lambda {
Parser.parse("[")
}.should raise_error(Parser::ParseError, "could not parse the regex '['")
end
-
+
it "should use the correct string name" do
lambda {
Parser.parse("]")
}.should raise_error(Parser::ParseError, "could not parse the regex ']'")
end
-
+
it "should allow multiple expressions inside a char class (i.e [a-zA-Z])"
-
+
it "should be able to parse multiple ORs (a|b|c)"
it "should be able to parse (a|b)+" do
pending do
result = Parser.parse("(a|b)+")
result.should be_a_kind_of(Plus)
end
end
describe "debugging info" do
it "should have debugging info off by default" do
Parser.new.should_not be_debugging
end
it "should be able to set debugging information" do
p = Parser.new
p.debug = true
p.should be_debugging
end
end
end
end
end
|
smtlaissezfaire/hopcroft
|
1484a92da2cfdc9cedda6be9d9f1f86f164d9686
|
Output debugging info for a parse that fails
|
diff --git a/lib/hopcroft/regex/parser.rb b/lib/hopcroft/regex/parser.rb
index a3aae4a..2a21d61 100644
--- a/lib/hopcroft/regex/parser.rb
+++ b/lib/hopcroft/regex/parser.rb
@@ -1,30 +1,39 @@
require "treetop"
Treetop.load File.dirname(__FILE__) + "/regex_parser"
module Hopcroft
module Regex
class Parser
class ParseError < StandardError; end
- def self.parse(str)
- new.parse_and_eval(str)
+ def self.parse(str, debugging = false)
+ obj = new
+ obj.debug = debugging
+ obj.parse_and_eval(str)
end
def initialize
@parser = Regex::TreetopRegexParser.new
end
def parse(str)
@parser.parse(str)
end
-
+
+ def debugging?
+ @debug ? true : false
+ end
+
+ attr_writer :debug
+
def parse_and_eval(str)
if parse = parse(str)
parse.eval
else
+ puts @parser.inspect if debugging?
raise ParseError, "could not parse the regex '#{str}'"
end
end
end
end
end
diff --git a/spec/hopcoft/regex/parser_spec.rb b/spec/hopcoft/regex/parser_spec.rb
index a254999..bc365d1 100644
--- a/spec/hopcoft/regex/parser_spec.rb
+++ b/spec/hopcoft/regex/parser_spec.rb
@@ -1,242 +1,254 @@
require File.expand_path(File.dirname(__FILE__) + "/../../spec_helper")
module Hopcroft
module Regex
describe Parser do
it "should parse 'a' as a Char" do
Parser.parse("a").should == Char.new('a')
end
it "should parse 'b' as a Char" do
Parser.parse("b").should == Char.new("b")
end
it "should parse 'ab' as two chars" do
Parser.parse("ab").should == (Char.new("a") + Char.new("b"))
end
it "should parse a '.' as a Dot" do
Parser.parse(".").should == Dot.new
end
it "should parse a '\.' as a char dot" do
Parser.parse('\.').should == Char.new(".")
end
it "should parse '\..' as an escaped char + a dot" do
Parser.parse("\\..").should == (Char.new(".") + Dot.new)
end
it "should parse 'a*' as a kleen star" do
Parser.parse("a*").should == KleenStar.new(Char.new("a"))
end
it "should parse 'b*' as a kleen star" do
Parser.parse("b*").should == KleenStar.new(Char.new("b"))
end
it "should parse '\*' as the star char" do
Parser.parse("\\*").should == Char.new("*")
end
it "should parse 'a\*' as a followed by a char" do
Parser.parse("a\\*").should == (Char.new("a") + Char.new("*"))
end
it "should parse a? as an optional a" do
Parser.parse("a?").should == OptionalSymbol.new(Char.new("a"))
end
it "should parse b? as an optional b" do
Parser.parse("b?").should == OptionalSymbol.new(Char.new("b"))
end
it "should parse an escaped question mark as a char" do
Parser.parse("\\?").should == Char.new("?")
end
it "should parse a plus" do
Parser.parse("a+").should == Plus.new(Char.new("a"))
end
it "should parse 'b+'" do
Parser.parse("b+").should == Plus.new(Char.new("b"))
end
it "should parse an escaped plus" do
Parser.parse("\\+").should == Char.new("+")
end
it "should parse [a-z] as a character class" do
Parser.parse("[a-z]").should == CharacterClass.new("a-z")
end
it "should parse [b-c] as a character class" do
Parser.parse("[b-c]").should == CharacterClass.new("b-c")
end
it "should parse \ as an open bracket char" do
Parser.parse("\\[").should == Char.new("[")
end
it "should parse \] as a closed bracket char" do
Parser.parse("\\]").should == Char.new("]")
end
it "should parse 'ab' as a concatenation of a and b" do
char1 = Char.new("a")
char2 = Char.new("b")
Parser.parse("ab").should == Concatenation.new(char1, char2)
end
it "should parse [a-z]* as a kleen star of a char class" do
Parser.parse("[a-z]*").should == KleenStar.new(CharacterClass.new("a-z"))
end
it "should parse alternation" do
result = Parser.parse("a|b")
result.should be_a_kind_of(Alternation)
result.should == Alternation.new(Char.new("a"), Char.new("b"))
end
it "should parse correct chars in the alternation" do
result = Parser.parse("x|y")
result.should be_a_kind_of(Alternation)
result.should == Alternation.new(Char.new("x"), Char.new("y"))
end
it "should parse '.|a' as an alternation" do
result = Parser.parse(".|a")
result.should be_a_kind_of(Alternation)
result.should == Alternation.new(Dot.new, Char.new("a"))
end
it "should allow a char class in the second position" do
result = Parser.parse(".|[a-z]")
result.should be_a_kind_of(Alternation)
result.should == Alternation.new(Dot.new, CharacterClass.new("a-z"))
result.expressions.last.should be_a_kind_of(CharacterClass)
end
it "should allow a plus after a char class" do
result = Parser.parse("[a-z]+")
result.should be_a_kind_of(Plus)
result.should == Plus.new(CharacterClass.new("a-z"))
end
it "should see an escaped plus as a char" do
Parser.parse('\+').should be_a_kind_of(Char)
end
it "should see an escaped plus with a argment in front of it as an escaped plus with a concatenation" do
result = Parser.parse('a\+')
result.should == Concatenation.new(Char.new("a"), Char.new("+"))
end
it "should allow an optional char class" do
result = Parser.parse("[a-z]?")
result.should == OptionalSymbol.new(CharacterClass.new("a-z"))
end
it "should parse with parens" do
result = Parser.parse("([a-z])")
result.should be_a_kind_of(CharacterClass)
end
it "should parse an escaped paren inside parens" do
result = Parser.parse("(\\()")
result.should == Char.new("(")
end
it "should allow parens around a concatenation" do
result = Parser.parse("(ab)")
result.should == (Char.new("a") + Char.new("b"))
end
it "should parse matching escaped parens inside a set of parens" do
result = Parser.parse '(\(\))'
result.should == (Char.new("(") + Char.new(")"))
end
it "should parse two sets of parens around each other" do
result = Parser.parse "((ab))"
result.should == (Char.new("a") + Char.new("b"))
end
it "should parse a number" do
result = Parser.parse("9")
result.should == Char.new("9")
end
it "should parse any single non-special char (one that isn't in the regex set)" do
result = Parser.parse("$")
result.should == Char.new("$")
end
it "should parse an escaped or" do
result = Parser.parse('\|')
result.should == Char.new("|")
end
it "should parse an underscore" do
result = Parser.parse("_")
result.should == Char.new("_")
end
it "should parse a char class with one element" do
result = Parser.parse("[a]")
result.should == Char.new("a")
end
it "should parse an escaped special char inside a character class" do
result = Parser.parse('[\+]')
result.should be_a_kind_of(Char)
result.should == Char.new("+")
end
it "should parse two escaped chars within a char range" do
result = Parser.parse '[\a-\b]'
result.should be_a_kind_of(CharacterClass)
result.should == CharacterClass.new("\\a-\\b")
end
it "should NOT parse an empty char class" do
lambda {
Parser.parse("[]")
}.should raise_error(Parser::ParseError)
end
["+", "?", "*", "[", "]", "\\", "|"].each do |char|
it "should not parse the regex '#{char}'" do
lambda {
Parser.parse("#{char}")
}.should raise_error(Parser::ParseError)
end
end
it "should raise an error if it cannot parse a string" do
lambda {
Parser.parse("[")
}.should raise_error(Parser::ParseError, "could not parse the regex '['")
end
it "should use the correct string name" do
lambda {
Parser.parse("]")
}.should raise_error(Parser::ParseError, "could not parse the regex ']'")
end
it "should allow multiple expressions inside a char class (i.e [a-zA-Z])"
it "should be able to parse multiple ORs (a|b|c)"
it "should be able to parse (a|b)+" do
pending do
result = Parser.parse("(a|b)+")
result.should be_a_kind_of(Plus)
end
end
+
+ describe "debugging info" do
+ it "should have debugging info off by default" do
+ Parser.new.should_not be_debugging
+ end
+
+ it "should be able to set debugging information" do
+ p = Parser.new
+ p.debug = true
+ p.should be_debugging
+ end
+ end
end
end
end
|
smtlaissezfaire/hopcroft
|
832901cf0bf5a4a511b25c78c4acc4f42b2c3d65
|
Add a pending spec
|
diff --git a/spec/hopcoft/regex/parser_spec.rb b/spec/hopcoft/regex/parser_spec.rb
index 856b20e..a254999 100644
--- a/spec/hopcoft/regex/parser_spec.rb
+++ b/spec/hopcoft/regex/parser_spec.rb
@@ -1,235 +1,242 @@
require File.expand_path(File.dirname(__FILE__) + "/../../spec_helper")
module Hopcroft
module Regex
describe Parser do
it "should parse 'a' as a Char" do
Parser.parse("a").should == Char.new('a')
end
it "should parse 'b' as a Char" do
Parser.parse("b").should == Char.new("b")
end
it "should parse 'ab' as two chars" do
Parser.parse("ab").should == (Char.new("a") + Char.new("b"))
end
it "should parse a '.' as a Dot" do
Parser.parse(".").should == Dot.new
end
it "should parse a '\.' as a char dot" do
Parser.parse('\.').should == Char.new(".")
end
it "should parse '\..' as an escaped char + a dot" do
Parser.parse("\\..").should == (Char.new(".") + Dot.new)
end
it "should parse 'a*' as a kleen star" do
Parser.parse("a*").should == KleenStar.new(Char.new("a"))
end
it "should parse 'b*' as a kleen star" do
Parser.parse("b*").should == KleenStar.new(Char.new("b"))
end
it "should parse '\*' as the star char" do
Parser.parse("\\*").should == Char.new("*")
end
it "should parse 'a\*' as a followed by a char" do
Parser.parse("a\\*").should == (Char.new("a") + Char.new("*"))
end
it "should parse a? as an optional a" do
Parser.parse("a?").should == OptionalSymbol.new(Char.new("a"))
end
it "should parse b? as an optional b" do
Parser.parse("b?").should == OptionalSymbol.new(Char.new("b"))
end
it "should parse an escaped question mark as a char" do
Parser.parse("\\?").should == Char.new("?")
end
it "should parse a plus" do
Parser.parse("a+").should == Plus.new(Char.new("a"))
end
it "should parse 'b+'" do
Parser.parse("b+").should == Plus.new(Char.new("b"))
end
it "should parse an escaped plus" do
Parser.parse("\\+").should == Char.new("+")
end
it "should parse [a-z] as a character class" do
Parser.parse("[a-z]").should == CharacterClass.new("a-z")
end
it "should parse [b-c] as a character class" do
Parser.parse("[b-c]").should == CharacterClass.new("b-c")
end
it "should parse \ as an open bracket char" do
Parser.parse("\\[").should == Char.new("[")
end
it "should parse \] as a closed bracket char" do
Parser.parse("\\]").should == Char.new("]")
end
it "should parse 'ab' as a concatenation of a and b" do
char1 = Char.new("a")
char2 = Char.new("b")
Parser.parse("ab").should == Concatenation.new(char1, char2)
end
it "should parse [a-z]* as a kleen star of a char class" do
Parser.parse("[a-z]*").should == KleenStar.new(CharacterClass.new("a-z"))
end
it "should parse alternation" do
result = Parser.parse("a|b")
result.should be_a_kind_of(Alternation)
result.should == Alternation.new(Char.new("a"), Char.new("b"))
end
it "should parse correct chars in the alternation" do
result = Parser.parse("x|y")
result.should be_a_kind_of(Alternation)
result.should == Alternation.new(Char.new("x"), Char.new("y"))
end
it "should parse '.|a' as an alternation" do
result = Parser.parse(".|a")
result.should be_a_kind_of(Alternation)
result.should == Alternation.new(Dot.new, Char.new("a"))
end
it "should allow a char class in the second position" do
result = Parser.parse(".|[a-z]")
result.should be_a_kind_of(Alternation)
result.should == Alternation.new(Dot.new, CharacterClass.new("a-z"))
result.expressions.last.should be_a_kind_of(CharacterClass)
end
it "should allow a plus after a char class" do
result = Parser.parse("[a-z]+")
result.should be_a_kind_of(Plus)
result.should == Plus.new(CharacterClass.new("a-z"))
end
it "should see an escaped plus as a char" do
Parser.parse('\+').should be_a_kind_of(Char)
end
it "should see an escaped plus with a argment in front of it as an escaped plus with a concatenation" do
result = Parser.parse('a\+')
result.should == Concatenation.new(Char.new("a"), Char.new("+"))
end
it "should allow an optional char class" do
result = Parser.parse("[a-z]?")
result.should == OptionalSymbol.new(CharacterClass.new("a-z"))
end
it "should parse with parens" do
result = Parser.parse("([a-z])")
result.should be_a_kind_of(CharacterClass)
end
it "should parse an escaped paren inside parens" do
result = Parser.parse("(\\()")
result.should == Char.new("(")
end
it "should allow parens around a concatenation" do
result = Parser.parse("(ab)")
result.should == (Char.new("a") + Char.new("b"))
end
it "should parse matching escaped parens inside a set of parens" do
result = Parser.parse '(\(\))'
result.should == (Char.new("(") + Char.new(")"))
end
it "should parse two sets of parens around each other" do
result = Parser.parse "((ab))"
result.should == (Char.new("a") + Char.new("b"))
end
it "should parse a number" do
result = Parser.parse("9")
result.should == Char.new("9")
end
it "should parse any single non-special char (one that isn't in the regex set)" do
result = Parser.parse("$")
result.should == Char.new("$")
end
it "should parse an escaped or" do
result = Parser.parse('\|')
result.should == Char.new("|")
end
it "should parse an underscore" do
result = Parser.parse("_")
result.should == Char.new("_")
end
it "should parse a char class with one element" do
result = Parser.parse("[a]")
result.should == Char.new("a")
end
it "should parse an escaped special char inside a character class" do
result = Parser.parse('[\+]')
result.should be_a_kind_of(Char)
result.should == Char.new("+")
end
it "should parse two escaped chars within a char range" do
result = Parser.parse '[\a-\b]'
result.should be_a_kind_of(CharacterClass)
result.should == CharacterClass.new("\\a-\\b")
end
it "should NOT parse an empty char class" do
lambda {
Parser.parse("[]")
}.should raise_error(Parser::ParseError)
end
["+", "?", "*", "[", "]", "\\", "|"].each do |char|
it "should not parse the regex '#{char}'" do
lambda {
Parser.parse("#{char}")
}.should raise_error(Parser::ParseError)
end
end
it "should raise an error if it cannot parse a string" do
lambda {
Parser.parse("[")
}.should raise_error(Parser::ParseError, "could not parse the regex '['")
end
it "should use the correct string name" do
lambda {
Parser.parse("]")
}.should raise_error(Parser::ParseError, "could not parse the regex ']'")
end
it "should allow multiple expressions inside a char class (i.e [a-zA-Z])"
it "should be able to parse multiple ORs (a|b|c)"
+
+ it "should be able to parse (a|b)+" do
+ pending do
+ result = Parser.parse("(a|b)+")
+ result.should be_a_kind_of(Plus)
+ end
+ end
end
end
end
|
smtlaissezfaire/hopcroft
|
06e1eb28768e72e9ad9a6b12c30f6ba0131582eb
|
Remove unnecessary 'do's from treetop grammar
|
diff --git a/lib/hopcroft/regex/regex_parser.treetop b/lib/hopcroft/regex/regex_parser.treetop
index 367406d..ae30193 100644
--- a/lib/hopcroft/regex/regex_parser.treetop
+++ b/lib/hopcroft/regex/regex_parser.treetop
@@ -1,155 +1,155 @@
module Hopcroft
module Regex
- grammar TreetopRegex do
- rule a_regex do
+ grammar TreetopRegex
+ rule a_regex
expr_with_parens / expr_without_parens
end
- rule expr_with_parens do
+ rule expr_with_parens
"(" expr_with_parens ")" {
def eval
expr_with_parens.eval
end
}
/
expr_without_parens
end
rule expr_without_parens
multi_valued_expression
end
- rule multi_valued_expression do
+ rule multi_valued_expression
car:single_value_expression cdr:single_value_expression* {
def eval
if cdr.elements.any?
a = [car.eval, cdr.elements.map { |element| element.eval }]
a.flatten!
a.inject { |collection, element| collection + element }
else
car.eval
end
end
}
end
rule single_value_expression
escaped_char / expr_without_escaping
end
- rule expr_without_escaping do
+ rule expr_without_escaping
kleen_star / expr_without_kleen_star
end
- rule expr_without_kleen_star do
+ rule expr_without_kleen_star
alternation / expr_without_alternation
end
- rule expr_without_alternation do
+ rule expr_without_alternation
plus_expr / expr_without_plus
end
- rule expr_without_plus do
+ rule expr_without_plus
optional_expr / expr_without_optional_expr
end
- rule expr_without_optional_expr do
+ rule expr_without_optional_expr
char_class / dot / unescaped_char
end
- rule alternation do
+ rule alternation
one:expr_without_alternation "|" two:expr_without_alternation {
def eval
Alternation.new(one.eval, two.eval)
end
}
end
- rule plus_expr do
+ rule plus_expr
expr_without_plus "+" {
def eval
Plus.new(expr_without_plus.eval)
end
}
end
- rule optional_expr do
+ rule optional_expr
expr_without_optional_expr "?" {
def eval
OptionalSymbol.new(expr_without_optional_expr.eval)
end
}
end
- rule char_class do
+ rule char_class
"[" char_class_expr "]" {
def eval
CharacterClass.new(char_class_expr.eval)
end
}
end
- rule char_class_expr do
+ rule char_class_expr
one:char "-" two:char {
def eval
"#{one.text_value}-#{two.text_value}"
end
}
/
char {
def eval
text_value
end
}
end
- rule kleen_star do
+ rule kleen_star
expr_without_kleen_star "*" {
def eval
KleenStar.new(expr_without_kleen_star.eval)
end
}
end
- rule dot do
+ rule dot
"." {
def eval
Dot.new
end
}
end
- rule char do
+ rule char
unescaped_char / escaped_char
end
- rule unescaped_char do
+ rule unescaped_char
non_special_char {
def eval
Char.new(text_value)
end
}
end
- rule escaped_char do
+ rule escaped_char
escape_char any_char {
def eval
Char.new(any_char.text_value)
end
}
end
- rule non_special_char do
+ rule non_special_char
!("(" / ")" / "[" / "+" / "?" / "+" / "]" / "|" / "*" / "\\") .
end
- rule any_char do
+ rule any_char
.
end
- rule escape_char do
+ rule escape_char
"\\"
end
end
end
end
|
smtlaissezfaire/hopcroft
|
5816a56f3684a7f9f3fa71a828acde679d7cf546
|
Add DFA transition table. Refactor TransitionTable to DFA
|
diff --git a/lib/hopcroft/machine.rb b/lib/hopcroft/machine.rb
index b6c94d3..35c207f 100644
--- a/lib/hopcroft/machine.rb
+++ b/lib/hopcroft/machine.rb
@@ -1,14 +1,16 @@
module Hopcroft
module Machine
extend Using
+ using :TransitionTable
using :NfaTransitionTable
+ using :DfaTransitionTable
using :State
using :Transition
using :StateMachine
using :EpsilonTransition
using :AnyCharTransition
using :TableConverter
using :TableDisplayer
end
end
diff --git a/lib/hopcroft/machine/dfa_transition_table.rb b/lib/hopcroft/machine/dfa_transition_table.rb
new file mode 100644
index 0000000..1f1b03c
--- /dev/null
+++ b/lib/hopcroft/machine/dfa_transition_table.rb
@@ -0,0 +1,25 @@
+module Hopcroft
+ module Machine
+ class DfaTransitionTable < TransitionTable
+ class DuplicateStateError < StandardError; end
+
+ def add_state_change(from, to, sym)
+ self[from] ||= {}
+ raise DuplicateStateError if self[from][sym]
+ self[from][sym] = to
+ end
+
+ def has_state_change?(from, to, sym)
+ self[from] && self[from][sym] && self[from][sym] == to ? true : false
+ end
+
+ def target_for(state, sym)
+ self[state] && self[state][sym] ? self[state][sym] : nil
+ end
+
+ alias_method :initial_state, :start_state
+ alias_method :initial_states, :start_state
+ alias_method :next_transitions, :target_for
+ end
+ end
+end
diff --git a/lib/hopcroft/machine/nfa_transition_table.rb b/lib/hopcroft/machine/nfa_transition_table.rb
index dc6b95a..4cfa29e 100644
--- a/lib/hopcroft/machine/nfa_transition_table.rb
+++ b/lib/hopcroft/machine/nfa_transition_table.rb
@@ -1,98 +1,76 @@
module Hopcroft
module Machine
- class NfaTransitionTable < Hash
- class MissingStartState < StandardError; end
-
- attr_reader :start_state
-
+ class NfaTransitionTable < TransitionTable
def start_state=(start_state)
self[start_state] ||= {}
- @start_state = start_state
+ super
end
-
+
# Create a transition without marking appropriate start states
def add_state_change(from_state, to_state, transition_symbol)
sym = transition_symbol
self[from_state] ||= {}
self[from_state][sym] ||= []
self[from_state][sym] << to_state
end
def has_state_change?(from_state, to_state, transition_symbol)
self[from_state] &&
self[from_state][transition_symbol] &&
self[from_state][transition_symbol].include?(to_state)
end
def targets_for(state, transition_sym)
find_targets_matching(state, transition_sym) do |target|
epsilon_states_following(target)
end
end
def initial_states
[start_state] + epsilon_states_following(start_state)
end
def next_transitions(states, sym)
states.map { |s| targets_for(s, sym) }.compact.flatten
end
- def matches?(input_array, current_states = initial_states)
- raise MissingStartState unless start_state
-
- input_array.each do |sym|
- current_states = next_transitions(current_states, sym.to_sym)
- end
-
- current_states.any? { |state| state.final? }
- end
-
- def inspect
- TableDisplayer.new(self).to_s
- end
-
- def to_hash
- Hash.new(self)
- end
-
private
def epsilon_states_following(state)
find_targets_matching(state, EpsilonTransition) do |target|
epsilon_states_following(target)
end
end
def find_targets_matching(state, transition_sym, &recursion_block)
returning Array.new do |a|
direct_targets = find_targets_for(state, transition_sym)
append a, direct_targets
direct_targets.each do |target|
append a, recursion_block.call(target)
end
end
end
def find_targets_for(state, transition_sym)
returning Array.new do |a|
if state = self[state]
if state[transition_sym]
append a, state[transition_sym]
end
if state[AnyCharTransition] && transition_sym != EpsilonTransition
append a, state[AnyCharTransition]
end
end
end
end
def append(array1, array2)
array1.push *array2
end
end
end
end
diff --git a/lib/hopcroft/machine/transition_table.rb b/lib/hopcroft/machine/transition_table.rb
new file mode 100644
index 0000000..dfff988
--- /dev/null
+++ b/lib/hopcroft/machine/transition_table.rb
@@ -0,0 +1,36 @@
+module Hopcroft
+ module Machine
+ class TransitionTable < Hash
+ class MissingStartState < StandardError; end
+
+ attr_accessor :start_state
+
+ def add_state_change(from, to, sym)
+ self[from] ||= {}
+ self[from][sym] = to
+ end
+
+ def has_state_change?(from, to, sym)
+ self[from] && self[from][sym]
+ end
+
+ def matches?(input_array, current_states = initial_states)
+ raise MissingStartState unless start_state
+
+ input_array.each do |sym|
+ current_states = next_transitions(current_states, sym.to_sym)
+ end
+
+ current_states.any? { |state| state.final? }
+ end
+
+ def to_hash
+ Hash.new(self)
+ end
+
+ def inspect
+ TableDisplayer.new(self).to_s
+ end
+ end
+ end
+end
\ No newline at end of file
diff --git a/spec/hopcoft/machine/dfa_transition_table_spec.rb b/spec/hopcoft/machine/dfa_transition_table_spec.rb
new file mode 100644
index 0000000..4771332
--- /dev/null
+++ b/spec/hopcoft/machine/dfa_transition_table_spec.rb
@@ -0,0 +1,115 @@
+require File.expand_path(File.dirname(__FILE__) + "/../../spec_helper")
+
+module Hopcroft
+ module Machine
+ describe DfaTransitionTable do
+ before do
+ @table = DfaTransitionTable.new
+ @state = State.new
+ end
+
+ it "should have the start state as assignable" do
+ @table.start_state = @state
+ @table.start_state.should equal(@state)
+ end
+
+ describe "adding state changes" do
+ before do
+ @state_two = State.new
+ end
+
+ it "should be able to add a state change with a symbol" do
+ @table.add_state_change(@state, @state_two, :symbol)
+ @table.has_state_change?(@state, @state_two, :symbol).should be_true
+ end
+
+ it "should not have a state change if none are provided" do
+ @table.has_state_change?(@state, @state_two, :symbol).should be_false
+ end
+
+ it "should not match the state change if with a different sym" do
+ @table.add_state_change(@state, @state_two, :symbol)
+ @table.has_state_change?(@state, @state_two, :bar).should be_false
+ end
+
+ it "should not match the state change with a different starting state" do
+ @table.add_state_change(@state, @state_two, :symbol)
+ @table.has_state_change?(mock('different state'), @state_two, :symbol).should be_false
+ end
+
+ it "should not match the state change with a different finishing state" do
+ @table.add_state_change(@state, @state_two, :symbol)
+ @table.has_state_change?(@state, mock('a different state'), :symbol).should be_false
+ end
+
+ it "should raise an error if a state change for the state & symbol has already been provided" do
+ @table.add_state_change(@state, @state_two, :symbol)
+
+ lambda {
+ @table.add_state_change(@state, mock("another target"), :symbol)
+ }.should raise_error(DfaTransitionTable::DuplicateStateError)
+ end
+ end
+
+ describe "target_for" do
+ before do
+ @state_two = State.new
+ end
+
+ it "should be the to symbol of the state change" do
+ @table.add_state_change(@state, @state_two, :symbol)
+ @table.target_for(@state, :symbol).should == @state_two
+ end
+
+ it "should return nil if it cannot find the state" do
+ @table.add_state_change(@state, @state_two, :symbol)
+ @table.target_for(mock("a different state"), :symbol).should be_nil
+ end
+
+ it "should return nil if it cannot find the symbol" do
+ @table.add_state_change(@state, @state_two, :symbol)
+ @table.target_for(@state, :foo).should be_nil
+ end
+ end
+
+ describe "to_hash" do
+ it "should return a hash" do
+ @table.to_hash.should be_a_kind_of(Hash)
+ end
+
+ it "should return a hash constructed from the table" do
+ Hash.should_receive(:new).with(@table)
+ @table.to_hash
+ end
+ end
+
+ describe "initial_states" do
+ it "should be the start state" do
+ @table.start_state = @state
+ @table.initial_state.should equal(@state)
+ end
+ end
+
+ describe "next_transitions" do
+ it "should be an alias for target_for" do
+ @table.method(:next_transitions).should == @table.method(:target_for)
+ end
+ end
+
+ describe "matches?" do
+ it "should raise an error if there is no start state" do
+ lambda {
+ @table.matches?("foo")
+ }.should raise_error(DfaTransitionTable::MissingStartState)
+ end
+ end
+
+ describe "inspect" do
+ it "should call TableDisplayer" do
+ TableDisplayer.should_receive(:new)
+ @table.inspect
+ end
+ end
+ end
+ end
+end
\ No newline at end of file
|
smtlaissezfaire/hopcroft
|
b2a09907efe5120853a40a24b0856ad98a2c7507
|
Rename TransitionTable => NfaTransitionTable
|
diff --git a/lib/hopcroft/machine.rb b/lib/hopcroft/machine.rb
index c1529e7..b6c94d3 100644
--- a/lib/hopcroft/machine.rb
+++ b/lib/hopcroft/machine.rb
@@ -1,14 +1,14 @@
module Hopcroft
module Machine
extend Using
- using :TransitionTable
+ using :NfaTransitionTable
using :State
using :Transition
using :StateMachine
using :EpsilonTransition
using :AnyCharTransition
using :TableConverter
using :TableDisplayer
end
end
diff --git a/lib/hopcroft/machine/transition_table.rb b/lib/hopcroft/machine/nfa_transition_table.rb
similarity index 98%
rename from lib/hopcroft/machine/transition_table.rb
rename to lib/hopcroft/machine/nfa_transition_table.rb
index 615862b..dc6b95a 100644
--- a/lib/hopcroft/machine/transition_table.rb
+++ b/lib/hopcroft/machine/nfa_transition_table.rb
@@ -1,98 +1,98 @@
module Hopcroft
module Machine
- class TransitionTable < Hash
+ class NfaTransitionTable < Hash
class MissingStartState < StandardError; end
attr_reader :start_state
def start_state=(start_state)
self[start_state] ||= {}
@start_state = start_state
end
# Create a transition without marking appropriate start states
def add_state_change(from_state, to_state, transition_symbol)
sym = transition_symbol
self[from_state] ||= {}
self[from_state][sym] ||= []
self[from_state][sym] << to_state
end
def has_state_change?(from_state, to_state, transition_symbol)
self[from_state] &&
self[from_state][transition_symbol] &&
self[from_state][transition_symbol].include?(to_state)
end
def targets_for(state, transition_sym)
find_targets_matching(state, transition_sym) do |target|
epsilon_states_following(target)
end
end
def initial_states
[start_state] + epsilon_states_following(start_state)
end
def next_transitions(states, sym)
states.map { |s| targets_for(s, sym) }.compact.flatten
end
def matches?(input_array, current_states = initial_states)
raise MissingStartState unless start_state
input_array.each do |sym|
current_states = next_transitions(current_states, sym.to_sym)
end
current_states.any? { |state| state.final? }
end
def inspect
TableDisplayer.new(self).to_s
end
def to_hash
Hash.new(self)
end
private
def epsilon_states_following(state)
find_targets_matching(state, EpsilonTransition) do |target|
epsilon_states_following(target)
end
end
def find_targets_matching(state, transition_sym, &recursion_block)
returning Array.new do |a|
direct_targets = find_targets_for(state, transition_sym)
append a, direct_targets
direct_targets.each do |target|
append a, recursion_block.call(target)
end
end
end
def find_targets_for(state, transition_sym)
returning Array.new do |a|
if state = self[state]
if state[transition_sym]
append a, state[transition_sym]
end
if state[AnyCharTransition] && transition_sym != EpsilonTransition
append a, state[AnyCharTransition]
end
end
end
end
def append(array1, array2)
array1.push *array2
end
end
end
end
diff --git a/lib/hopcroft/machine/state_machine.rb b/lib/hopcroft/machine/state_machine.rb
index 7bda36d..a762a3c 100644
--- a/lib/hopcroft/machine/state_machine.rb
+++ b/lib/hopcroft/machine/state_machine.rb
@@ -1,42 +1,42 @@
module Hopcroft
module Machine
class StateMachine
def initialize(start_state = State.new)
@start_state = start_state
end
attr_accessor :start_state
def states
[start_state, start_state.substates].flatten
end
def final_states
states.select { |s| s.final? }
end
def matches_string?(str)
matches_array? str.split("")
end
alias_method :matches?, :matches_string?
def matches_array?(array)
state_table.matches?(array)
end
def state_table
- returning TransitionTable.new do |table|
+ returning NfaTransitionTable.new do |table|
table.start_state = start_state
start_state.add_transitions_to_table(table)
end
end
def deep_clone
returning clone do |c|
c.start_state = c.start_state.deep_clone
end
end
end
end
end
diff --git a/spec/hopcoft/machine/transition_table_spec.rb b/spec/hopcoft/machine/nfa_transition_table_spec.rb
similarity index 96%
rename from spec/hopcoft/machine/transition_table_spec.rb
rename to spec/hopcoft/machine/nfa_transition_table_spec.rb
index 275c453..3e73c2a 100644
--- a/spec/hopcoft/machine/transition_table_spec.rb
+++ b/spec/hopcoft/machine/nfa_transition_table_spec.rb
@@ -1,249 +1,249 @@
require File.expand_path(File.dirname(__FILE__) + "/../../spec_helper")
module Hopcroft
module Machine
- describe TransitionTable do
+ describe NfaTransitionTable do
describe "adding a state change" do
before do
- @table = TransitionTable.new
+ @table = NfaTransitionTable.new
end
it "should create a two dimensional entry, with [from_state][transition] = [to_state]" do
from = mock(State, :start_state? => false)
to = mock(State, :start_state? => false)
@table.add_state_change(from, to, :a)
@table.targets_for(from, :a).should == [to]
end
it "should be able to use strings when finding a start state" do
from = mock State, :start_state? => true, :final? => false
to = mock State, :start_state? => false, :final? => true
@table.add_state_change(from, to, :a)
@table.start_state = from
@table.matches?("a").should be_true
end
it "should be able to use multiple transitions from the same state" do
from = mock(State, :start_state? => false)
first_result = mock(State, :start_state? => false)
second_result = mock(State, :start_state? => false)
@table.start_state = from
@table.add_state_change(from, first_result, :a)
@table.add_state_change(from, second_result, :b)
@table.targets_for(from, :a).should == [first_result]
@table.targets_for(from, :b).should == [second_result]
end
it "should be able to use the same transition symbol to different states (for an NFA)" do
from = mock(State, :start_state? => false)
first_result = mock(State, :start_state? => false)
second_result = mock(State, :start_state? => false)
@table.add_state_change(from, first_result, :a)
@table.add_state_change(from, second_result, :a)
@table.targets_for(from, :a).should == [first_result, second_result]
end
it "should have a transition for an 'any' transition" do
from = State.new :start_state => true
to = from.add_transition :any => true
transition = from.transitions.first.symbol
@table.add_state_change from, to, transition
@table.targets_for(from, :a).should == [to]
end
end
describe "targets_for" do
before do
- @table = TransitionTable.new
+ @table = NfaTransitionTable.new
@state = mock(State, :start_state? => false, :final? => false)
@transition = :foo
end
it "should reutrn an empty array if it indexes the state, but no transitions for that state" do
@table.add_state_change(@state, @state, :foo)
@table.targets_for(@state, :bar).should == []
end
it "should return an empty array if it does not index the state" do
@table.targets_for(@state, :foo).should == []
end
end
describe "matching a symbol" do
before do
- @table = TransitionTable.new
+ @table = NfaTransitionTable.new
end
it "should match if one symbol in the table, and the symbol is given" do
start_state = mock(State, :final? => false, :start_state? => true)
final_state = mock(State, :final? => true, :start_state? => false)
@table.start_state = start_state
@table.add_state_change(start_state, final_state, :foo)
@table.matches?([:foo]).should be_true
end
it "should not match when it cannot index the transition" do
start_state = mock(State, :final? => false, :start_state? => true)
final_state = mock(State, :final? => true, :start_state? => false)
@table.start_state = start_state
@table.add_state_change(start_state, final_state, :foo)
@table.matches?([:bar]).should be_false
end
it "should not match if the last state in the input is not a final state" do
start_state = mock(State, :final? => false, :start_state? => true)
final_state = mock(State, :final? => false, :start_state? => false)
@table.start_state = start_state
@table.add_state_change(start_state, final_state, :foo)
@table.matches?([:foo]).should be_false
end
it "should raise an error if there is no start state" do
lambda {
@table.matches?([:foo])
- }.should raise_error(TransitionTable::MissingStartState)
+ }.should raise_error(NfaTransitionTable::MissingStartState)
end
it "should match when following two symbols" do
start_state = mock(State, :final? => false, :start_state? => true)
state_one = mock(State, :final? => false, :start_state? => false)
state_two = mock(State, :final? => true, :start_state? => false)
@table.start_state = start_state
@table.add_state_change start_state, state_one, :one
@table.add_state_change state_one, state_two, :two
@table.matches?([:one, :two]).should be_true
end
it "should not match when following two symbols, and the last is not a final state" do
start_state = mock(State, :final? => false, :start_state? => true)
state_one = mock(State, :final? => false, :start_state? => false)
state_two = mock(State, :final? => false, :start_state? => false)
@table.start_state = start_state
@table.add_state_change start_state, state_one, :one
@table.add_state_change state_one, state_two, :two
@table.matches?([:one, :two]).should be_false
end
it "should match a NFA, where a start state leads to one of two possible final states" do
start_state = mock(State, :final? => false, :start_state? => true)
state_one = mock(State, :final? => false, :start_state? => false)
state_two = mock(State, :final? => true, :start_state? => false)
@table.start_state = start_state
@table.add_state_change start_state, state_one, :one
@table.add_state_change start_state, state_two, :one
@table.matches?([:one]).should be_true
end
it "should not match when the one state does not transition to the other" do
start_state = mock(State, :final? => false, :start_state? => true)
state_one = mock(State, :final? => false, :start_state? => false)
state_two = mock(State, :final? => true, :start_state? => false)
@table.start_state = start_state
@table.add_state_change start_state, state_one, :one
@table.add_state_change start_state, state_two, :two
@table.matches?([:one, :two]).should be_false
end
it "should not consume any chars under an epsilon transition" do
start_state = mock(State, :final? => false, :start_state? => true)
state_two = mock(State, :final? => true, :start_state? => false)
@table.start_state = start_state
@table.add_state_change start_state, state_two, EpsilonTransition
@table.matches?([]).should be_true
end
end
describe "inspect" do
before do
- @table = TransitionTable.new
+ @table = NfaTransitionTable.new
@displayer = mock TableDisplayer
end
it "should output a state table" do
TableDisplayer.should_receive(:new).with(@table).and_return @displayer
@displayer.should_receive(:to_s)
@table.inspect
end
it "should display 'Empty table' when empty" do
@table.inspect.should == "\nEmpty table"
end
it "should be able to display a start state with no transitions" do
start_state = State.new(:start_state => true, :name => "Foo")
@table.start_state = start_state
@table.inspect.should include("Foo")
end
end
describe "to_hash" do
it "should return a hash" do
- TransitionTable.new.to_hash.class.should == Hash
+ NfaTransitionTable.new.to_hash.class.should == Hash
end
end
describe "initial states" do
describe "for a start_state to an epsilon transition" do
# +--------------+--------------------------------------+-------------+
# | | Hopcroft::Machine::EpsilonTransition | a |
# +--------------+--------------------------------------+-------------+
# | -> State 207 | State 208 | |
# | State 208 | | * State 209 |
# +--------------+--------------------------------------+-------------+
before do
@state1 = State.new :start_state => true, :name => "State 1"
@state2 = State.new :start_state => false, :name => "State 2"
@state3 = State.new :start_state => false, :name => "State 3", :final_state => true
- @table = TransitionTable.new
+ @table = NfaTransitionTable.new
@table.add_state_change @state1, @state2, EpsilonTransition
@table.add_state_change @state2, @state3, :a
@table.start_state = @state1
end
it "should have state 1 as an initial state (it is a start state)" do
@table.initial_states.should include(@state1)
end
it "should have state 2 as an initial state (it has an epsilon transition from the start state)" do
@table.initial_states.should include(@state2)
end
it "should not have state 3 as an initial state" do
@table.initial_states.should_not include(@state3)
end
end
end
end
end
end
diff --git a/spec/hopcoft/machine/state_spec.rb b/spec/hopcoft/machine/state_spec.rb
index 77bf741..ac9ae81 100644
--- a/spec/hopcoft/machine/state_spec.rb
+++ b/spec/hopcoft/machine/state_spec.rb
@@ -1,277 +1,277 @@
require File.expand_path(File.dirname(__FILE__) + "/../../spec_helper")
module Hopcroft
module Machine
describe State do
it "should set the start state on the first state to a start state" do
state = State.new
state.should be_a_start_state
end
it "should have no transitions to begin with" do
s = State.new
s.transitions.should == []
end
it "should be able to add transitions" do
s = State.new
s.add_transition :symbol => :foo
s.transitions.size.should == 1
end
it "should be a start state" do
s = State.new
s.should be_a_start_state
end
it "should have start state assigned" do
s = State.new
s.start_state = false
s.should_not be_a_start_state
end
it "should not be a final state by default" do
s = State.new
s.should_not be_a_final_state
s.should_not be_final
end
it "should have the final state as assignable" do
s = State.new
s.final_state = true
s.should be_a_final_state
s.should be_final
end
describe "transitions" do
before do
@state = State.new
end
it "should create a transition when calling add_transition" do
@state.add_transition :symbol => :foo
@state.transitions.first.should be_a_kind_of(Transition)
end
it "should pass on the symbol to the transition" do
@state.add_transition :symbol => :baz
transition = @state.transitions.first
transition.symbol.should == :baz
end
it "should construct a new state when none provided" do
@state.add_transition :symbol => :foo
transition = @state.transitions.first
transition.state.should be_a_kind_of(State)
end
it "should not have the new state as the start state" do
@state.add_transition :symbol => :foo
transition = @state.transitions.first
transition.state.should_not be_a_start_state
end
it "should be able to mark the new state as a final state" do
@state.add_transition :symbol => :foo, :final => true
transition = @state.transitions.first
transition.state.should be_a_final_state
end
it "should take another state as the transition target" do
state = mock('state', :null_object => true)
@state.add_transition :symbol => :foo, :state => state
transition = @state.transitions.first
transition.state.should == state
end
it "should be able to add transitions recursively" do
s1 = State.new
s2 = State.new
s1.add_transition :state => s2, :epsilon => true
s2.add_transition :state => s1, :epsilon => true
- table = TransitionTable.new
+ table = NfaTransitionTable.new
s1.add_transitions_to_table(table)
end
describe "passed :machine => m" do
before do
@state = State.new
@machine = StateMachine.new
end
it "should add a transition to another state machines first state" do
other_machine_start_state = @machine.start_state
@state.add_transition :machine => @machine
@state.transitions.first.state.should == other_machine_start_state
end
it "should add the transition as an epsilon transition" do
@state.add_transition :machine => @machine
@state.transitions.first.should be_a_kind_of(EpsilonTransition)
end
it "should no longer have the other machines start state as a start state in this machine" do
other_machine_start_state = @machine.start_state
@state.add_transition :machine => @machine
@state.transitions.first.state.should_not be_a_start_state
end
end
end
describe "name" do
it "should take a name param" do
state = State.new(:name => "foo")
state.name.should == "foo"
end
it "should auto-assign a state #" do
State.reset_counter!
state = State.new
state.name.should == "State 1"
end
it "should assign 'State 2' for the second state created" do
State.reset_counter!
State.new
state2 = State.new
state2.name.should == "State 2"
end
end
describe "to_s" do
it "should be aliased to the name" do
s = State.new
s.method(:name).should == s.method(:to_s)
end
end
describe "inspect" do
it "should display the name" do
s = State.new(:name => "State 1")
s.inspect.should include("State 1")
end
it "should show start state, final state, etc." do
s = State.new(:name => "State 1", :start_state => true, :final => true)
s.inspect.should == "State 1 {start: true, final: true, transitions: 0}"
end
it "should display the correct value for the start state" do
s = State.new(:name => "State 1", :start_state => false, :final => true)
s.inspect.should == "State 1 {start: false, final: true, transitions: 0}"
end
it "should display the correct value for the final state" do
s = State.new(:name => "State 1", :start_state => true, :final => false)
s.inspect.should == "State 1 {start: true, final: false, transitions: 0}"
end
it "should display 1 transition" do
s = State.new(:name => "State 1", :start_state => true, :final => true)
s.add_transition
s.inspect.should == "State 1 {start: true, final: true, transitions: 1}"
end
end
describe "deep_clone" do
before do
@state = State.new
end
it "should be of class State" do
clone = @state.deep_clone
clone.should be_a_kind_of(State)
end
it "should be a new instance" do
clone = @state.deep_clone
clone.should_not equal(@state)
end
it "should be a final state if the original was a final state" do
@state.final_state = true
clone = @state.deep_clone
clone.should be_a_final_state
end
it "should not have the same transition objects" do
@state.add_transition
transition = @state.transitions.first
clone = @state.deep_clone
clone.transitions.first.should_not equal(transition)
end
it "should have one transition if the original had one transition" do
@state.add_transition
clone = @state.deep_clone
clone.transitions.size.should == 1
end
it "should have two transitions if the original had two transition" do
@state.add_transition
@state.add_transition
clone = @state.deep_clone
clone.transitions.size.should == 2
end
it "should have a transition as a Transition object" do
@state.add_transition
clone = @state.deep_clone
clone.transitions.first.should be_a_kind_of(Transition)
end
it "should call deep_clone on the transitions" do
@state.add_transition
@state.transitions.first.should_receive(:deep_clone)
@state.deep_clone
end
end
describe "substates" do
before do
@state = State.new
end
it "should have none with no transitions" do
@state.substates.should == []
end
it "should have a state which is linked to by a transition" do
new_state = @state.add_transition :symbol => :foo
@state.substates.should == [new_state]
end
it "should have multiple states" do
one = @state.add_transition :symbol => :foo
two = @state.add_transition :symbol => :foo
@state.substates.should == [one, two]
end
it "should show states of the states (should find the states substates recursively)" do
substate = @state.add_transition :symbol => :foo
sub_substate = substate.add_transition :symbol => :foo
@state.substates.should == [substate, sub_substate]
end
end
end
end
end
diff --git a/spec/hopcoft/machine/transition_table/targets_for_spec.rb b/spec/hopcoft/machine/transition_table/targets_for_spec.rb
index 8b4364e..eb6e84e 100644
--- a/spec/hopcoft/machine/transition_table/targets_for_spec.rb
+++ b/spec/hopcoft/machine/transition_table/targets_for_spec.rb
@@ -1,115 +1,115 @@
require File.expand_path(File.dirname(__FILE__) + "/../../../spec_helper")
module Hopcroft
module Machine
- describe TransitionTable do
+ describe NfaTransitionTable do
describe "new_transitions_for" do
before do
- @table = TransitionTable.new
+ @table = NfaTransitionTable.new
@state1 = State.new
@state2 = State.new(:start_state => false)
@state3 = State.new(:start_state => false)
@state4 = State.new(:start_state => false)
@state5 = State.new(:start_state => false)
end
it "should return a transition under a symbol" do
@table.add_state_change @state1, @state2, :a
@table.targets_for(@state1, :a).should == [@state2]
end
it "should use the correct sym" do
@table.add_state_change @state1, @state2, :b
@table.targets_for(@state1, :b).should == [@state2]
end
it "should only find states matching the sym" do
@table.add_state_change @state1, @state2, :a
@table.add_state_change @state1, @state3, :b
@table.targets_for(@state1, :a).should == [@state2]
end
it "should return multiple transitions under the same sym" do
@table.add_state_change @state1, @state2, :a
@table.add_state_change @state1, @state3, :a
@table.targets_for(@state1, :a).should == [@state2, @state3]
end
it "should return an empty array if it cannot find the sym" do
@table.add_state_change @state1, @state2, :a
@table.targets_for(@state1, :b).should == []
end
it "should return an empty array if it cannot find the state" do
@table.add_state_change @state1, @state2, :a
@table.targets_for(mock('a state'), :a).should == []
end
it "should find an epsilon transition *after* a match" do
@table.add_state_change @state1, @state2, :a
@table.add_state_change @state2, @state3, EpsilonTransition
@table.targets_for(@state1, :a).should == [@state2, @state3]
end
it "should find multiple epsilon transitions" do
@table.add_state_change @state1, @state2, :a
@table.add_state_change @state2, @state3, EpsilonTransition
@table.add_state_change @state2, @state4, EpsilonTransition
@table.targets_for(@state1, :a).should == [@state2, @state3, @state4]
end
it "should follow epsilon transitions following other epsilon transitions *after* a match" do
@table.add_state_change @state1, @state2, :a
@table.add_state_change @state2, @state3, EpsilonTransition
@table.add_state_change @state3, @state4, EpsilonTransition
@table.targets_for(@state1, :a).should == [@state2, @state3, @state4]
end
it "should not follow a sym after matching the sym" do
@table.add_state_change @state1, @state2, :a
@table.add_state_change @state2, @state3, :a
@table.targets_for(@state1, :a).should == [@state2]
end
it "should not follow a sym after matching a sym when epsilon transitions connect the syms" do
@table.add_state_change @state1, @state2, :a
@table.add_state_change @state2, @state3, EpsilonTransition
@table.add_state_change @state3, @state4, :a
@table.targets_for(@state1, :a).should == [@state2, @state3]
end
it "should not find other (non-epsilon) transitions after a match" do
@table.add_state_change @state1, @state2, :a
@table.add_state_change @state2, @state3, :a
@table.add_state_change @state2, @state3, EpsilonTransition
@table.add_state_change @state3, @state4, :a
@table.targets_for(@state1, :a).should == [@state2, @state3]
end
it "should match a char under an AnyCharTransition" do
@table.add_state_change @state1, @state2, AnyCharTransition
@table.targets_for(@state1, :a).should == [@state2]
end
it "should match any char" do
@table.add_state_change @state1, @state2, AnyCharTransition
@table.targets_for(@state1, :b).should == [@state2]
end
end
end
end
end
|
smtlaissezfaire/hopcroft
|
5127e70d7fdf56bab248de18311f78ffd60195eb
|
Remove pending specs which now work
|
diff --git a/spec/hopcoft/regex/kleen_star_spec.rb b/spec/hopcoft/regex/kleen_star_spec.rb
index 2c28b2b..c43de79 100644
--- a/spec/hopcoft/regex/kleen_star_spec.rb
+++ b/spec/hopcoft/regex/kleen_star_spec.rb
@@ -1,81 +1,79 @@
require File.expand_path(File.dirname(__FILE__) + "/../../spec_helper")
module Hopcroft
module Regex
describe KleenStar do
it "should take a regex" do
s = KleenStar.new(Char.new("f"))
s.expression.should == Char.new("f")
end
describe "matching" do
def new_kleen_star_with_string(str)
KleenStar.new(Char.new(str))
end
it "should match 0 chars" do
s = new_kleen_star_with_string("a")
s.matches?("").should be_true
end
it "should match one char" do
s = new_kleen_star_with_string("a")
s.matches?("a").should be_true
end
it "should NOT match a different char" do
s = new_kleen_star_with_string("a")
s.matches?("b").should be_false
end
it "should match many of the same chars" do
s = new_kleen_star_with_string("a")
s.matches?("aa").should be_true
end
it "should match 10 chars" do
s = new_kleen_star_with_string("a")
s.matches?("aaaaaaaaaa").should be_true
end
it "should match 'aaaa' with '(a|b)*'" do
- pending 'TODO'
expr = Alternation.new(Char.new("a"), Char.new("b"))
s = KleenStar.new(expr)
s.matches?("aaaa").should be_true
end
it "should match 'bbbb' with '(a|b)*'" do
- pending 'TODO'
expr = Alternation.new(Char.new("a"), Char.new("b"))
s = KleenStar.new(expr)
s.matches?("bbbb").should be_true
end
end
it "should have the regex string" do
KleenStar.new(Char.new("a")).to_regex_s.should == "a*"
end
it "should be able to output the state table" do
star = KleenStar.new(Char.new("a"))
lambda {
star.to_machine.state_table.inspect
}.should_not raise_error
end
describe "==" do
it "should be true with subexpressions" do
one = KleenStar.new(CharacterClass.new("a-z"))
two = KleenStar.new(CharacterClass.new("a-z"))
one.should == two
two.should == one
end
end
end
end
end
|
smtlaissezfaire/hopcroft
|
e5e273e6e77a06a35c0f5c4949f0e65ad8bf44ae
|
State#substates now recursively traverses substates
|
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..b90e3e3
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,5 @@
+*.bundle
+*.o
+tmp/
+doc/
+TAGS
\ No newline at end of file
diff --git a/GPL_LICENSE b/GPL_LICENSE
new file mode 100644
index 0000000..94a9ed0
--- /dev/null
+++ b/GPL_LICENSE
@@ -0,0 +1,674 @@
+ GNU GENERAL PUBLIC LICENSE
+ Version 3, 29 June 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+ Preamble
+
+ The GNU General Public License is a free, copyleft license for
+software and other kinds of works.
+
+ The licenses for most software and other practical works are designed
+to take away your freedom to share and change the works. By contrast,
+the GNU General Public License is intended to guarantee your freedom to
+share and change all versions of a program--to make sure it remains free
+software for all its users. We, the Free Software Foundation, use the
+GNU General Public License for most of our software; it applies also to
+any other work released this way by its authors. You can apply it to
+your programs, too.
+
+ When we speak of free software, we are referring to freedom, not
+price. Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+them if you wish), that you receive source code or can get it if you
+want it, that you can change the software or use pieces of it in new
+free programs, and that you know you can do these things.
+
+ To protect your rights, we need to prevent others from denying you
+these rights or asking you to surrender the rights. Therefore, you have
+certain responsibilities if you distribute copies of the software, or if
+you modify it: responsibilities to respect the freedom of others.
+
+ For example, if you distribute copies of such a program, whether
+gratis or for a fee, you must pass on to the recipients the same
+freedoms that you received. You must make sure that they, too, receive
+or can get the source code. And you must show them these terms so they
+know their rights.
+
+ Developers that use the GNU GPL protect your rights with two steps:
+(1) assert copyright on the software, and (2) offer you this License
+giving you legal permission to copy, distribute and/or modify it.
+
+ For the developers' and authors' protection, the GPL clearly explains
+that there is no warranty for this free software. For both users' and
+authors' sake, the GPL requires that modified versions be marked as
+changed, so that their problems will not be attributed erroneously to
+authors of previous versions.
+
+ Some devices are designed to deny users access to install or run
+modified versions of the software inside them, although the manufacturer
+can do so. This is fundamentally incompatible with the aim of
+protecting users' freedom to change the software. The systematic
+pattern of such abuse occurs in the area of products for individuals to
+use, which is precisely where it is most unacceptable. Therefore, we
+have designed this version of the GPL to prohibit the practice for those
+products. If such problems arise substantially in other domains, we
+stand ready to extend this provision to those domains in future versions
+of the GPL, as needed to protect the freedom of users.
+
+ Finally, every program is threatened constantly by software patents.
+States should not allow patents to restrict development and use of
+software on general-purpose computers, but in those that do, we wish to
+avoid the special danger that patents applied to a free program could
+make it effectively proprietary. To prevent this, the GPL assures that
+patents cannot be used to render the program non-free.
+
+ The precise terms and conditions for copying, distribution and
+modification follow.
+
+ TERMS AND CONDITIONS
+
+ 0. Definitions.
+
+ "This License" refers to version 3 of the GNU General Public License.
+
+ "Copyright" also means copyright-like laws that apply to other kinds of
+works, such as semiconductor masks.
+
+ "The Program" refers to any copyrightable work licensed under this
+License. Each licensee is addressed as "you". "Licensees" and
+"recipients" may be individuals or organizations.
+
+ To "modify" a work means to copy from or adapt all or part of the work
+in a fashion requiring copyright permission, other than the making of an
+exact copy. The resulting work is called a "modified version" of the
+earlier work or a work "based on" the earlier work.
+
+ A "covered work" means either the unmodified Program or a work based
+on the Program.
+
+ To "propagate" a work means to do anything with it that, without
+permission, would make you directly or secondarily liable for
+infringement under applicable copyright law, except executing it on a
+computer or modifying a private copy. Propagation includes copying,
+distribution (with or without modification), making available to the
+public, and in some countries other activities as well.
+
+ To "convey" a work means any kind of propagation that enables other
+parties to make or receive copies. Mere interaction with a user through
+a computer network, with no transfer of a copy, is not conveying.
+
+ An interactive user interface displays "Appropriate Legal Notices"
+to the extent that it includes a convenient and prominently visible
+feature that (1) displays an appropriate copyright notice, and (2)
+tells the user that there is no warranty for the work (except to the
+extent that warranties are provided), that licensees may convey the
+work under this License, and how to view a copy of this License. If
+the interface presents a list of user commands or options, such as a
+menu, a prominent item in the list meets this criterion.
+
+ 1. Source Code.
+
+ The "source code" for a work means the preferred form of the work
+for making modifications to it. "Object code" means any non-source
+form of a work.
+
+ A "Standard Interface" means an interface that either is an official
+standard defined by a recognized standards body, or, in the case of
+interfaces specified for a particular programming language, one that
+is widely used among developers working in that language.
+
+ The "System Libraries" of an executable work include anything, other
+than the work as a whole, that (a) is included in the normal form of
+packaging a Major Component, but which is not part of that Major
+Component, and (b) serves only to enable use of the work with that
+Major Component, or to implement a Standard Interface for which an
+implementation is available to the public in source code form. A
+"Major Component", in this context, means a major essential component
+(kernel, window system, and so on) of the specific operating system
+(if any) on which the executable work runs, or a compiler used to
+produce the work, or an object code interpreter used to run it.
+
+ The "Corresponding Source" for a work in object code form means all
+the source code needed to generate, install, and (for an executable
+work) run the object code and to modify the work, including scripts to
+control those activities. However, it does not include the work's
+System Libraries, or general-purpose tools or generally available free
+programs which are used unmodified in performing those activities but
+which are not part of the work. For example, Corresponding Source
+includes interface definition files associated with source files for
+the work, and the source code for shared libraries and dynamically
+linked subprograms that the work is specifically designed to require,
+such as by intimate data communication or control flow between those
+subprograms and other parts of the work.
+
+ The Corresponding Source need not include anything that users
+can regenerate automatically from other parts of the Corresponding
+Source.
+
+ The Corresponding Source for a work in source code form is that
+same work.
+
+ 2. Basic Permissions.
+
+ All rights granted under this License are granted for the term of
+copyright on the Program, and are irrevocable provided the stated
+conditions are met. This License explicitly affirms your unlimited
+permission to run the unmodified Program. The output from running a
+covered work is covered by this License only if the output, given its
+content, constitutes a covered work. This License acknowledges your
+rights of fair use or other equivalent, as provided by copyright law.
+
+ You may make, run and propagate covered works that you do not
+convey, without conditions so long as your license otherwise remains
+in force. You may convey covered works to others for the sole purpose
+of having them make modifications exclusively for you, or provide you
+with facilities for running those works, provided that you comply with
+the terms of this License in conveying all material for which you do
+not control copyright. Those thus making or running the covered works
+for you must do so exclusively on your behalf, under your direction
+and control, on terms that prohibit them from making any copies of
+your copyrighted material outside their relationship with you.
+
+ Conveying under any other circumstances is permitted solely under
+the conditions stated below. Sublicensing is not allowed; section 10
+makes it unnecessary.
+
+ 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
+
+ No covered work shall be deemed part of an effective technological
+measure under any applicable law fulfilling obligations under article
+11 of the WIPO copyright treaty adopted on 20 December 1996, or
+similar laws prohibiting or restricting circumvention of such
+measures.
+
+ When you convey a covered work, you waive any legal power to forbid
+circumvention of technological measures to the extent such circumvention
+is effected by exercising rights under this License with respect to
+the covered work, and you disclaim any intention to limit operation or
+modification of the work as a means of enforcing, against the work's
+users, your or third parties' legal rights to forbid circumvention of
+technological measures.
+
+ 4. Conveying Verbatim Copies.
+
+ You may convey verbatim copies of the Program's source code as you
+receive it, in any medium, provided that you conspicuously and
+appropriately publish on each copy an appropriate copyright notice;
+keep intact all notices stating that this License and any
+non-permissive terms added in accord with section 7 apply to the code;
+keep intact all notices of the absence of any warranty; and give all
+recipients a copy of this License along with the Program.
+
+ You may charge any price or no price for each copy that you convey,
+and you may offer support or warranty protection for a fee.
+
+ 5. Conveying Modified Source Versions.
+
+ You may convey a work based on the Program, or the modifications to
+produce it from the Program, in the form of source code under the
+terms of section 4, provided that you also meet all of these conditions:
+
+ a) The work must carry prominent notices stating that you modified
+ it, and giving a relevant date.
+
+ b) The work must carry prominent notices stating that it is
+ released under this License and any conditions added under section
+ 7. This requirement modifies the requirement in section 4 to
+ "keep intact all notices".
+
+ c) You must license the entire work, as a whole, under this
+ License to anyone who comes into possession of a copy. This
+ License will therefore apply, along with any applicable section 7
+ additional terms, to the whole of the work, and all its parts,
+ regardless of how they are packaged. This License gives no
+ permission to license the work in any other way, but it does not
+ invalidate such permission if you have separately received it.
+
+ d) If the work has interactive user interfaces, each must display
+ Appropriate Legal Notices; however, if the Program has interactive
+ interfaces that do not display Appropriate Legal Notices, your
+ work need not make them do so.
+
+ A compilation of a covered work with other separate and independent
+works, which are not by their nature extensions of the covered work,
+and which are not combined with it such as to form a larger program,
+in or on a volume of a storage or distribution medium, is called an
+"aggregate" if the compilation and its resulting copyright are not
+used to limit the access or legal rights of the compilation's users
+beyond what the individual works permit. Inclusion of a covered work
+in an aggregate does not cause this License to apply to the other
+parts of the aggregate.
+
+ 6. Conveying Non-Source Forms.
+
+ You may convey a covered work in object code form under the terms
+of sections 4 and 5, provided that you also convey the
+machine-readable Corresponding Source under the terms of this License,
+in one of these ways:
+
+ a) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by the
+ Corresponding Source fixed on a durable physical medium
+ customarily used for software interchange.
+
+ b) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by a
+ written offer, valid for at least three years and valid for as
+ long as you offer spare parts or customer support for that product
+ model, to give anyone who possesses the object code either (1) a
+ copy of the Corresponding Source for all the software in the
+ product that is covered by this License, on a durable physical
+ medium customarily used for software interchange, for a price no
+ more than your reasonable cost of physically performing this
+ conveying of source, or (2) access to copy the
+ Corresponding Source from a network server at no charge.
+
+ c) Convey individual copies of the object code with a copy of the
+ written offer to provide the Corresponding Source. This
+ alternative is allowed only occasionally and noncommercially, and
+ only if you received the object code with such an offer, in accord
+ with subsection 6b.
+
+ d) Convey the object code by offering access from a designated
+ place (gratis or for a charge), and offer equivalent access to the
+ Corresponding Source in the same way through the same place at no
+ further charge. You need not require recipients to copy the
+ Corresponding Source along with the object code. If the place to
+ copy the object code is a network server, the Corresponding Source
+ may be on a different server (operated by you or a third party)
+ that supports equivalent copying facilities, provided you maintain
+ clear directions next to the object code saying where to find the
+ Corresponding Source. Regardless of what server hosts the
+ Corresponding Source, you remain obligated to ensure that it is
+ available for as long as needed to satisfy these requirements.
+
+ e) Convey the object code using peer-to-peer transmission, provided
+ you inform other peers where the object code and Corresponding
+ Source of the work are being offered to the general public at no
+ charge under subsection 6d.
+
+ A separable portion of the object code, whose source code is excluded
+from the Corresponding Source as a System Library, need not be
+included in conveying the object code work.
+
+ A "User Product" is either (1) a "consumer product", which means any
+tangible personal property which is normally used for personal, family,
+or household purposes, or (2) anything designed or sold for incorporation
+into a dwelling. In determining whether a product is a consumer product,
+doubtful cases shall be resolved in favor of coverage. For a particular
+product received by a particular user, "normally used" refers to a
+typical or common use of that class of product, regardless of the status
+of the particular user or of the way in which the particular user
+actually uses, or expects or is expected to use, the product. A product
+is a consumer product regardless of whether the product has substantial
+commercial, industrial or non-consumer uses, unless such uses represent
+the only significant mode of use of the product.
+
+ "Installation Information" for a User Product means any methods,
+procedures, authorization keys, or other information required to install
+and execute modified versions of a covered work in that User Product from
+a modified version of its Corresponding Source. The information must
+suffice to ensure that the continued functioning of the modified object
+code is in no case prevented or interfered with solely because
+modification has been made.
+
+ If you convey an object code work under this section in, or with, or
+specifically for use in, a User Product, and the conveying occurs as
+part of a transaction in which the right of possession and use of the
+User Product is transferred to the recipient in perpetuity or for a
+fixed term (regardless of how the transaction is characterized), the
+Corresponding Source conveyed under this section must be accompanied
+by the Installation Information. But this requirement does not apply
+if neither you nor any third party retains the ability to install
+modified object code on the User Product (for example, the work has
+been installed in ROM).
+
+ The requirement to provide Installation Information does not include a
+requirement to continue to provide support service, warranty, or updates
+for a work that has been modified or installed by the recipient, or for
+the User Product in which it has been modified or installed. Access to a
+network may be denied when the modification itself materially and
+adversely affects the operation of the network or violates the rules and
+protocols for communication across the network.
+
+ Corresponding Source conveyed, and Installation Information provided,
+in accord with this section must be in a format that is publicly
+documented (and with an implementation available to the public in
+source code form), and must require no special password or key for
+unpacking, reading or copying.
+
+ 7. Additional Terms.
+
+ "Additional permissions" are terms that supplement the terms of this
+License by making exceptions from one or more of its conditions.
+Additional permissions that are applicable to the entire Program shall
+be treated as though they were included in this License, to the extent
+that they are valid under applicable law. If additional permissions
+apply only to part of the Program, that part may be used separately
+under those permissions, but the entire Program remains governed by
+this License without regard to the additional permissions.
+
+ When you convey a copy of a covered work, you may at your option
+remove any additional permissions from that copy, or from any part of
+it. (Additional permissions may be written to require their own
+removal in certain cases when you modify the work.) You may place
+additional permissions on material, added by you to a covered work,
+for which you have or can give appropriate copyright permission.
+
+ Notwithstanding any other provision of this License, for material you
+add to a covered work, you may (if authorized by the copyright holders of
+that material) supplement the terms of this License with terms:
+
+ a) Disclaiming warranty or limiting liability differently from the
+ terms of sections 15 and 16 of this License; or
+
+ b) Requiring preservation of specified reasonable legal notices or
+ author attributions in that material or in the Appropriate Legal
+ Notices displayed by works containing it; or
+
+ c) Prohibiting misrepresentation of the origin of that material, or
+ requiring that modified versions of such material be marked in
+ reasonable ways as different from the original version; or
+
+ d) Limiting the use for publicity purposes of names of licensors or
+ authors of the material; or
+
+ e) Declining to grant rights under trademark law for use of some
+ trade names, trademarks, or service marks; or
+
+ f) Requiring indemnification of licensors and authors of that
+ material by anyone who conveys the material (or modified versions of
+ it) with contractual assumptions of liability to the recipient, for
+ any liability that these contractual assumptions directly impose on
+ those licensors and authors.
+
+ All other non-permissive additional terms are considered "further
+restrictions" within the meaning of section 10. If the Program as you
+received it, or any part of it, contains a notice stating that it is
+governed by this License along with a term that is a further
+restriction, you may remove that term. If a license document contains
+a further restriction but permits relicensing or conveying under this
+License, you may add to a covered work material governed by the terms
+of that license document, provided that the further restriction does
+not survive such relicensing or conveying.
+
+ If you add terms to a covered work in accord with this section, you
+must place, in the relevant source files, a statement of the
+additional terms that apply to those files, or a notice indicating
+where to find the applicable terms.
+
+ Additional terms, permissive or non-permissive, may be stated in the
+form of a separately written license, or stated as exceptions;
+the above requirements apply either way.
+
+ 8. Termination.
+
+ You may not propagate or modify a covered work except as expressly
+provided under this License. Any attempt otherwise to propagate or
+modify it is void, and will automatically terminate your rights under
+this License (including any patent licenses granted under the third
+paragraph of section 11).
+
+ However, if you cease all violation of this License, then your
+license from a particular copyright holder is reinstated (a)
+provisionally, unless and until the copyright holder explicitly and
+finally terminates your license, and (b) permanently, if the copyright
+holder fails to notify you of the violation by some reasonable means
+prior to 60 days after the cessation.
+
+ Moreover, your license from a particular copyright holder is
+reinstated permanently if the copyright holder notifies you of the
+violation by some reasonable means, this is the first time you have
+received notice of violation of this License (for any work) from that
+copyright holder, and you cure the violation prior to 30 days after
+your receipt of the notice.
+
+ Termination of your rights under this section does not terminate the
+licenses of parties who have received copies or rights from you under
+this License. If your rights have been terminated and not permanently
+reinstated, you do not qualify to receive new licenses for the same
+material under section 10.
+
+ 9. Acceptance Not Required for Having Copies.
+
+ You are not required to accept this License in order to receive or
+run a copy of the Program. Ancillary propagation of a covered work
+occurring solely as a consequence of using peer-to-peer transmission
+to receive a copy likewise does not require acceptance. However,
+nothing other than this License grants you permission to propagate or
+modify any covered work. These actions infringe copyright if you do
+not accept this License. Therefore, by modifying or propagating a
+covered work, you indicate your acceptance of this License to do so.
+
+ 10. Automatic Licensing of Downstream Recipients.
+
+ Each time you convey a covered work, the recipient automatically
+receives a license from the original licensors, to run, modify and
+propagate that work, subject to this License. You are not responsible
+for enforcing compliance by third parties with this License.
+
+ An "entity transaction" is a transaction transferring control of an
+organization, or substantially all assets of one, or subdividing an
+organization, or merging organizations. If propagation of a covered
+work results from an entity transaction, each party to that
+transaction who receives a copy of the work also receives whatever
+licenses to the work the party's predecessor in interest had or could
+give under the previous paragraph, plus a right to possession of the
+Corresponding Source of the work from the predecessor in interest, if
+the predecessor has it or can get it with reasonable efforts.
+
+ You may not impose any further restrictions on the exercise of the
+rights granted or affirmed under this License. For example, you may
+not impose a license fee, royalty, or other charge for exercise of
+rights granted under this License, and you may not initiate litigation
+(including a cross-claim or counterclaim in a lawsuit) alleging that
+any patent claim is infringed by making, using, selling, offering for
+sale, or importing the Program or any portion of it.
+
+ 11. Patents.
+
+ A "contributor" is a copyright holder who authorizes use under this
+License of the Program or a work on which the Program is based. The
+work thus licensed is called the contributor's "contributor version".
+
+ A contributor's "essential patent claims" are all patent claims
+owned or controlled by the contributor, whether already acquired or
+hereafter acquired, that would be infringed by some manner, permitted
+by this License, of making, using, or selling its contributor version,
+but do not include claims that would be infringed only as a
+consequence of further modification of the contributor version. For
+purposes of this definition, "control" includes the right to grant
+patent sublicenses in a manner consistent with the requirements of
+this License.
+
+ Each contributor grants you a non-exclusive, worldwide, royalty-free
+patent license under the contributor's essential patent claims, to
+make, use, sell, offer for sale, import and otherwise run, modify and
+propagate the contents of its contributor version.
+
+ In the following three paragraphs, a "patent license" is any express
+agreement or commitment, however denominated, not to enforce a patent
+(such as an express permission to practice a patent or covenant not to
+sue for patent infringement). To "grant" such a patent license to a
+party means to make such an agreement or commitment not to enforce a
+patent against the party.
+
+ If you convey a covered work, knowingly relying on a patent license,
+and the Corresponding Source of the work is not available for anyone
+to copy, free of charge and under the terms of this License, through a
+publicly available network server or other readily accessible means,
+then you must either (1) cause the Corresponding Source to be so
+available, or (2) arrange to deprive yourself of the benefit of the
+patent license for this particular work, or (3) arrange, in a manner
+consistent with the requirements of this License, to extend the patent
+license to downstream recipients. "Knowingly relying" means you have
+actual knowledge that, but for the patent license, your conveying the
+covered work in a country, or your recipient's use of the covered work
+in a country, would infringe one or more identifiable patents in that
+country that you have reason to believe are valid.
+
+ If, pursuant to or in connection with a single transaction or
+arrangement, you convey, or propagate by procuring conveyance of, a
+covered work, and grant a patent license to some of the parties
+receiving the covered work authorizing them to use, propagate, modify
+or convey a specific copy of the covered work, then the patent license
+you grant is automatically extended to all recipients of the covered
+work and works based on it.
+
+ A patent license is "discriminatory" if it does not include within
+the scope of its coverage, prohibits the exercise of, or is
+conditioned on the non-exercise of one or more of the rights that are
+specifically granted under this License. You may not convey a covered
+work if you are a party to an arrangement with a third party that is
+in the business of distributing software, under which you make payment
+to the third party based on the extent of your activity of conveying
+the work, and under which the third party grants, to any of the
+parties who would receive the covered work from you, a discriminatory
+patent license (a) in connection with copies of the covered work
+conveyed by you (or copies made from those copies), or (b) primarily
+for and in connection with specific products or compilations that
+contain the covered work, unless you entered into that arrangement,
+or that patent license was granted, prior to 28 March 2007.
+
+ Nothing in this License shall be construed as excluding or limiting
+any implied license or other defenses to infringement that may
+otherwise be available to you under applicable patent law.
+
+ 12. No Surrender of Others' Freedom.
+
+ If conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot convey a
+covered work so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you may
+not convey it at all. For example, if you agree to terms that obligate you
+to collect a royalty for further conveying from those to whom you convey
+the Program, the only way you could satisfy both those terms and this
+License would be to refrain entirely from conveying the Program.
+
+ 13. Use with the GNU Affero General Public License.
+
+ Notwithstanding any other provision of this License, you have
+permission to link or combine any covered work with a work licensed
+under version 3 of the GNU Affero General Public License into a single
+combined work, and to convey the resulting work. The terms of this
+License will continue to apply to the part which is the covered work,
+but the special requirements of the GNU Affero General Public License,
+section 13, concerning interaction through a network will apply to the
+combination as such.
+
+ 14. Revised Versions of this License.
+
+ The Free Software Foundation may publish revised and/or new versions of
+the GNU General Public License from time to time. Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+ Each version is given a distinguishing version number. If the
+Program specifies that a certain numbered version of the GNU General
+Public License "or any later version" applies to it, you have the
+option of following the terms and conditions either of that numbered
+version or of any later version published by the Free Software
+Foundation. If the Program does not specify a version number of the
+GNU General Public License, you may choose any version ever published
+by the Free Software Foundation.
+
+ If the Program specifies that a proxy can decide which future
+versions of the GNU General Public License can be used, that proxy's
+public statement of acceptance of a version permanently authorizes you
+to choose that version for the Program.
+
+ Later license versions may give you additional or different
+permissions. However, no additional obligations are imposed on any
+author or copyright holder as a result of your choosing to follow a
+later version.
+
+ 15. Disclaimer of Warranty.
+
+ THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
+APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
+HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
+OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
+THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
+IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
+ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+ 16. Limitation of Liability.
+
+ IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
+THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
+GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
+USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
+DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
+PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
+EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
+SUCH DAMAGES.
+
+ 17. Interpretation of Sections 15 and 16.
+
+ If the disclaimer of warranty and limitation of liability provided
+above cannot be given local legal effect according to their terms,
+reviewing courts shall apply local law that most closely approximates
+an absolute waiver of all civil liability in connection with the
+Program, unless a warranty or assumption of liability accompanies a
+copy of the Program in return for a fee.
+
+ END OF TERMS AND CONDITIONS
+
+ How to Apply These Terms to Your New Programs
+
+ If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+ To do so, attach the following notices to the program. It is safest
+to attach them to the start of each source file to most effectively
+state the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+ <one line to give the program's name and a brief idea of what it does.>
+ Copyright (C) <year> <name of author>
+
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+Also add information on how to contact you by electronic and paper mail.
+
+ If the program does terminal interaction, make it output a short
+notice like this when it starts in an interactive mode:
+
+ <program> Copyright (C) <year> <name of author>
+ This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
+ This is free software, and you are welcome to redistribute it
+ under certain conditions; type `show c' for details.
+
+The hypothetical commands `show w' and `show c' should show the appropriate
+parts of the General Public License. Of course, your program's commands
+might be different; for a GUI interface, you would use an "about box".
+
+ You should also get your employer (if you work as a programmer) or school,
+if any, to sign a "copyright disclaimer" for the program, if necessary.
+For more information on this, and how to apply and follow the GNU GPL, see
+<http://www.gnu.org/licenses/>.
+
+ The GNU General Public License does not permit incorporating your program
+into proprietary programs. If your program is a subroutine library, you
+may consider it more useful to permit linking proprietary applications with
+the library. If this is what you want to do, use the GNU Lesser General
+Public License instead of this License. But first, please read
+<http://www.gnu.org/philosophy/why-not-lgpl.html>.
diff --git a/MIT_LICENSE b/MIT_LICENSE
new file mode 100644
index 0000000..f08f766
--- /dev/null
+++ b/MIT_LICENSE
@@ -0,0 +1,22 @@
+Copyright (c) 2008 Scott Taylor (smtlaissezfaire) <[email protected]>
+
+Permission is hereby granted, free of charge, to any person
+obtaining a copy of this software and associated documentation
+files (the "Software"), to deal in the Software without
+restriction, including without limitation the rights to use,
+copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the
+Software is furnished to do so, subject to the following
+conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+OTHER DEALINGS IN THE SOFTWARE.
diff --git a/README.rdoc b/README.rdoc
new file mode 100644
index 0000000..c366f92
--- /dev/null
+++ b/README.rdoc
@@ -0,0 +1,4 @@
+= Hopcroft
+
+A library for dealing with regular languages: Regexes and State Machines.
+
diff --git a/Rakefile b/Rakefile
new file mode 100644
index 0000000..6e7d314
--- /dev/null
+++ b/Rakefile
@@ -0,0 +1,6 @@
+
+Dir.glob(File.dirname(__FILE__) + "/tasks/**/**").each do |file|
+ load file
+end
+
+task :default => :spec
\ No newline at end of file
diff --git a/lib/hopcroft.rb b/lib/hopcroft.rb
new file mode 100644
index 0000000..1b0b77c
--- /dev/null
+++ b/lib/hopcroft.rb
@@ -0,0 +1,9 @@
+require "using"
+require "facets/kernel/returning"
+
+module Hopcroft
+ extend Using
+
+ using :Regex
+ using :Machine
+end
diff --git a/lib/hopcroft/machine.rb b/lib/hopcroft/machine.rb
new file mode 100644
index 0000000..c1529e7
--- /dev/null
+++ b/lib/hopcroft/machine.rb
@@ -0,0 +1,14 @@
+module Hopcroft
+ module Machine
+ extend Using
+
+ using :TransitionTable
+ using :State
+ using :Transition
+ using :StateMachine
+ using :EpsilonTransition
+ using :AnyCharTransition
+ using :TableConverter
+ using :TableDisplayer
+ end
+end
diff --git a/lib/hopcroft/machine/any_char_transition.rb b/lib/hopcroft/machine/any_char_transition.rb
new file mode 100644
index 0000000..0facd45
--- /dev/null
+++ b/lib/hopcroft/machine/any_char_transition.rb
@@ -0,0 +1,9 @@
+module Hopcroft
+ module Machine
+ class AnyCharTransition < Transition
+ def initialize(to)
+ super(self.class, to)
+ end
+ end
+ end
+end
diff --git a/lib/hopcroft/machine/epsilon_transition.rb b/lib/hopcroft/machine/epsilon_transition.rb
new file mode 100644
index 0000000..61dbc5c
--- /dev/null
+++ b/lib/hopcroft/machine/epsilon_transition.rb
@@ -0,0 +1,9 @@
+module Hopcroft
+ module Machine
+ class EpsilonTransition < Transition
+ def initialize(to)
+ super(self.class, to)
+ end
+ end
+ end
+end
diff --git a/lib/hopcroft/machine/state.rb b/lib/hopcroft/machine/state.rb
new file mode 100644
index 0000000..4184a43
--- /dev/null
+++ b/lib/hopcroft/machine/state.rb
@@ -0,0 +1,122 @@
+module Hopcroft
+ module Machine
+ class State
+ class << self
+ def reset_counter!
+ @counter = 1
+ end
+
+ def next_counter
+ returning counter do |c|
+ @counter += 1
+ end
+ end
+
+ def counter
+ @counter ||= 1
+ end
+ end
+
+ def initialize(options={})
+ @start_state = options[:start_state] if options.has_key?(:start_state)
+ @final_state = options[:final] if options.has_key?(:final)
+ assign_name(options)
+ end
+
+ attr_reader :name
+ alias_method :to_s, :name
+
+ def inspect
+ "#{name} {start: #{start_state?}, final: #{final_state?}, transitions: #{transitions.size}}"
+ end
+
+ def transitions
+ @transitions ||= []
+ end
+
+ attr_writer :transitions
+
+ # Accepts the following hash arguments:
+ #
+ # :machine => m (optional). Links current state to start state of machine
+ # given with an epsilon transition.
+ # :start_state => true | false. Make the state a start state. Defaults to false
+ # :final => true | false. Make the state a final state. Defaults to false
+ # :state => a_state (if none passed, a new one is constructed)
+ # :symbol => Symbol to transition to.
+ # :epsilon => An Epsilon Transition instead of a regular symbol transition
+ # :any => An any symbol transition. Equivalent to a regex '.'
+ #
+ def add_transition(args={})
+ args[:start_state] = false unless args.has_key?(:start_state)
+
+ if args[:machine]
+ machine = args[:machine]
+
+ args[:state] = machine.start_state
+ args[:state].start_state = false
+ args[:epsilon] = true
+ else
+ args[:state] ||= State.new(args)
+ end
+
+ returning args[:state] do |state|
+ transitions << transition_for(args, state)
+ yield(state) if block_given?
+ state
+ end
+ end
+
+ def transition_for(args, state)
+ if args[:epsilon]
+ EpsilonTransition.new(state)
+ elsif args[:any]
+ AnyCharTransition.new(state)
+ else
+ Transition.new(args[:symbol], state)
+ end
+ end
+
+ def start_state?
+ @start_state.equal?(false) ? false : true
+ end
+
+ attr_writer :start_state
+
+ def final_state?
+ @final_state ? true : false
+ end
+
+ alias_method :final?, :final_state?
+
+ attr_writer :final_state
+
+ def substates
+ transitions.map { |t| [t.state, t.state.substates] }.flatten
+ end
+
+ def add_transitions_to_table(table)
+ transitions.each do |transition|
+ to = transition.to
+
+ unless table.has_state_change?(self, to, transition.symbol)
+ table.add_state_change(self, to, transition.symbol)
+ transition.to.add_transitions_to_table(table)
+ end
+ end
+ end
+
+ def deep_clone
+ returning clone do |c|
+ c.transitions = transitions.map { |t| t.deep_clone }
+ end
+ end
+
+ private
+
+ def assign_name(options)
+ @name = options[:name] ? options[:name] : "State #{self.class.next_counter}"
+ end
+ end
+ end
+end
diff --git a/lib/hopcroft/machine/state_machine.rb b/lib/hopcroft/machine/state_machine.rb
new file mode 100644
index 0000000..7bda36d
--- /dev/null
+++ b/lib/hopcroft/machine/state_machine.rb
@@ -0,0 +1,42 @@
+module Hopcroft
+ module Machine
+ class StateMachine
+ def initialize(start_state = State.new)
+ @start_state = start_state
+ end
+
+ attr_accessor :start_state
+
+ def states
+ [start_state, start_state.substates].flatten
+ end
+
+ def final_states
+ states.select { |s| s.final? }
+ end
+
+ def matches_string?(str)
+ matches_array? str.split("")
+ end
+
+ alias_method :matches?, :matches_string?
+
+ def matches_array?(array)
+ state_table.matches?(array)
+ end
+
+ def state_table
+ returning TransitionTable.new do |table|
+ table.start_state = start_state
+ start_state.add_transitions_to_table(table)
+ end
+ end
+
+ def deep_clone
+ returning clone do |c|
+ c.start_state = c.start_state.deep_clone
+ end
+ end
+ end
+ end
+end
diff --git a/lib/hopcroft/machine/table_converter.rb b/lib/hopcroft/machine/table_converter.rb
new file mode 100644
index 0000000..11e33a6
--- /dev/null
+++ b/lib/hopcroft/machine/table_converter.rb
@@ -0,0 +1,42 @@
+module Hopcroft
+ module Machine
+ class TableConverter
+ EMPTY_SET_SYMBOL = []
+
+ def initialize(hash)
+ @hash = hash
+ end
+
+ def transition_symbols
+ @transition_symbols ||= @hash.values.map { |v| v.keys }.flatten.uniq
+ end
+
+ def primary_states
+ @primary_states ||= @hash.keys.dup
+ end
+
+ def header
+ ["", *transition_symbols]
+ end
+
+ def body
+ primary_states.map do |state|
+ [state, *values_from(state)]
+ end
+ end
+
+ def to_a
+ @hash.empty? ? [] : [header, body]
+ end
+
+ private
+
+ def values_from(state)
+ transition_symbols.map do |transition|
+ val = @hash[state][transition]
+ val ? val : EMPTY_SET_SYMBOL
+ end
+ end
+ end
+ end
+end
diff --git a/lib/hopcroft/machine/table_displayer.rb b/lib/hopcroft/machine/table_displayer.rb
new file mode 100644
index 0000000..f52c205
--- /dev/null
+++ b/lib/hopcroft/machine/table_displayer.rb
@@ -0,0 +1,67 @@
+require "terminal-table"
+require "facets/enumerable/map_with_index"
+
+module Hopcroft
+ module Machine
+ class TableDisplayer
+ NEWLINE = "\n"
+ EMPTY_TABLE_MESSAGE = "Empty table"
+
+ def initialize(state_table_hash)
+ @state_hash = state_table_hash
+ end
+
+ def to_a
+ [header, body]
+ end
+
+ def header
+ converted_table.header.map { |col| col.to_s }
+ end
+
+ def body
+ converted_table.body.map do |row|
+ row.map_with_index do |entry, index|
+ if index == 0
+ text = decorate_start_state(entry)
+ decorate_final_state(entry, text)
+ else
+ entry.map { |state| decorate_final_state(state) }.join(", ")
+ end
+ end
+ end
+ end
+
+ def to_s
+ returning String.new do |s|
+ s << NEWLINE
+ s << table
+ end
+ end
+
+ include Terminal::Table::TableHelper
+
+ def table
+ if @state_hash.empty?
+ EMPTY_TABLE_MESSAGE
+ else
+ super(header, *body).to_s
+ end
+ end
+
+ private
+
+ def decorate_final_state(state, text = state.name)
+ state.final? ? "* #{text}" : text
+ end
+
+ def decorate_start_state(state)
+ state.start_state? ? "-> #{state.name}" : state.name
+ end
+
+ def converted_table
+ @table ||= TableConverter.new(@state_hash)
+ end
+ end
+ end
+end
diff --git a/lib/hopcroft/machine/transition.rb b/lib/hopcroft/machine/transition.rb
new file mode 100644
index 0000000..3894afc
--- /dev/null
+++ b/lib/hopcroft/machine/transition.rb
@@ -0,0 +1,18 @@
+module Hopcroft
+ module Machine
+ class Transition
+ def initialize(symbol, state)
+ @symbol = symbol.respond_to?(:to_sym) ? symbol.to_sym : symbol
+ @state = state
+ end
+
+ attr_reader :symbol
+ attr_reader :state
+ alias_method :to, :state
+
+ def deep_clone
+ self.class.new(symbol, state.deep_clone)
+ end
+ end
+ end
+end
diff --git a/lib/hopcroft/machine/transition_table.rb b/lib/hopcroft/machine/transition_table.rb
new file mode 100644
index 0000000..615862b
--- /dev/null
+++ b/lib/hopcroft/machine/transition_table.rb
@@ -0,0 +1,98 @@
+module Hopcroft
+ module Machine
+ class TransitionTable < Hash
+ class MissingStartState < StandardError; end
+
+ attr_reader :start_state
+
+ def start_state=(start_state)
+ self[start_state] ||= {}
+ @start_state = start_state
+ end
+
+ # Create a transition without marking appropriate start states
+ def add_state_change(from_state, to_state, transition_symbol)
+ sym = transition_symbol
+
+ self[from_state] ||= {}
+ self[from_state][sym] ||= []
+ self[from_state][sym] << to_state
+ end
+
+ def has_state_change?(from_state, to_state, transition_symbol)
+ self[from_state] &&
+ self[from_state][transition_symbol] &&
+ self[from_state][transition_symbol].include?(to_state)
+ end
+
+ def targets_for(state, transition_sym)
+ find_targets_matching(state, transition_sym) do |target|
+ epsilon_states_following(target)
+ end
+ end
+
+ def initial_states
+ [start_state] + epsilon_states_following(start_state)
+ end
+
+ def next_transitions(states, sym)
+ states.map { |s| targets_for(s, sym) }.compact.flatten
+ end
+
+ def matches?(input_array, current_states = initial_states)
+ raise MissingStartState unless start_state
+
+ input_array.each do |sym|
+ current_states = next_transitions(current_states, sym.to_sym)
+ end
+
+ current_states.any? { |state| state.final? }
+ end
+
+ def inspect
+ TableDisplayer.new(self).to_s
+ end
+
+ def to_hash
+ Hash.new(self)
+ end
+
+ private
+
+ def epsilon_states_following(state)
+ find_targets_matching(state, EpsilonTransition) do |target|
+ epsilon_states_following(target)
+ end
+ end
+
+ def find_targets_matching(state, transition_sym, &recursion_block)
+ returning Array.new do |a|
+ direct_targets = find_targets_for(state, transition_sym)
+ append a, direct_targets
+
+ direct_targets.each do |target|
+ append a, recursion_block.call(target)
+ end
+ end
+ end
+
+ def find_targets_for(state, transition_sym)
+ returning Array.new do |a|
+ if state = self[state]
+ if state[transition_sym]
+ append a, state[transition_sym]
+ end
+
+ if state[AnyCharTransition] && transition_sym != EpsilonTransition
+ append a, state[AnyCharTransition]
+ end
+ end
+ end
+ end
+
+ def append(array1, array2)
+ array1.push *array2
+ end
+ end
+ end
+end
diff --git a/lib/hopcroft/regex.rb b/lib/hopcroft/regex.rb
new file mode 100644
index 0000000..185ef01
--- /dev/null
+++ b/lib/hopcroft/regex.rb
@@ -0,0 +1,34 @@
+require "treetop"
+
+module Hopcroft
+ module Regex
+ SPECIAL_CHARS = [
+ DOT = ".",
+ PLUS = "+",
+ QUESTION = "?",
+ STAR = "*",
+ OPEN_BRACKET = "[",
+ CLOSE_BRACKET = "]",
+ ESCAPE_CHAR = "\\",
+ ALTERNATION = "|"
+ ]
+
+ extend Using
+
+ using :Base
+ using :Char
+ using :KleenStar
+ using :Plus
+ using :Dot
+ using :CharacterClass
+ using :OptionalSymbol
+ using :Concatenation
+ using :Alternation
+
+ using :Parser
+
+ def self.compile(from_string)
+ Parser.parse(from_string)
+ end
+ end
+end
diff --git a/lib/hopcroft/regex/alternation.rb b/lib/hopcroft/regex/alternation.rb
new file mode 100644
index 0000000..0e7a536
--- /dev/null
+++ b/lib/hopcroft/regex/alternation.rb
@@ -0,0 +1,27 @@
+module Hopcroft
+ module Regex
+ class Alternation < Base
+ def initialize(*expressions)
+ @expressions = expressions
+ end
+
+ attr_reader :expressions
+
+ def to_regex_s
+ regexs.join ALTERNATION
+ end
+
+ def build_machine(start)
+ @expressions.each do |expr|
+ start.add_transition :machine => expr.to_machine
+ end
+ end
+
+ private
+
+ def regexs
+ @expressions.map { |expression| expression.to_regex_s }
+ end
+ end
+ end
+end
diff --git a/lib/hopcroft/regex/base.rb b/lib/hopcroft/regex/base.rb
new file mode 100644
index 0000000..aa748de
--- /dev/null
+++ b/lib/hopcroft/regex/base.rb
@@ -0,0 +1,50 @@
+module Hopcroft
+ module Regex
+ class Base
+ def initialize(expr)
+ @expression = expr
+ end
+
+ attr_reader :expression
+
+ def ==(other)
+ other.respond_to?(:to_regex_s) &&
+ to_regex_s == other.to_regex_s
+ end
+
+ def matches?(str)
+ to_machine.matches? str
+ end
+
+ alias_method :matched_by?, :matches?
+
+ def +(other)
+ Concatenation.new(self, other)
+ end
+
+ def |(other)
+ Alternation.new(self, other)
+ end
+
+ def to_regexp
+ Regexp.new(to_regex_s)
+ end
+
+ alias_method :to_regex, :to_regexp
+
+ def to_machine
+ new_machine do |m, start_state|
+ build_machine(start_state)
+ end
+ end
+
+ private
+
+ def new_machine
+ returning Machine::StateMachine.new do |machine|
+ yield machine, machine.start_state if block_given?
+ end
+ end
+ end
+ end
+end
diff --git a/lib/hopcroft/regex/char.rb b/lib/hopcroft/regex/char.rb
new file mode 100644
index 0000000..8260c71
--- /dev/null
+++ b/lib/hopcroft/regex/char.rb
@@ -0,0 +1,24 @@
+module Hopcroft
+ module Regex
+ class Char < Base
+ class InvalidInput < StandardError; end
+
+ def initialize(str)
+ raise InvalidInput if str.empty?
+ super
+ end
+
+ def to_regex_s
+ if SPECIAL_CHARS.include?(expression)
+ "#{ESCAPE_CHAR}#{expression}"
+ else
+ expression
+ end
+ end
+
+ def build_machine(start_state)
+ start_state.add_transition :symbol => expression, :final => true
+ end
+ end
+ end
+end
diff --git a/lib/hopcroft/regex/character_class.rb b/lib/hopcroft/regex/character_class.rb
new file mode 100644
index 0000000..bbf62a0
--- /dev/null
+++ b/lib/hopcroft/regex/character_class.rb
@@ -0,0 +1,54 @@
+module Hopcroft
+ module Regex
+ class CharacterClass < Base
+ class InvalidCharacterClass < StandardError; end
+
+ class << self
+ def new(str)
+ one_char_long?(str) ? Char.new(str) : super
+ end
+
+ private
+
+ def one_char_long?(str)
+ str.size == 1 || (str.size == 2 && str[0] == "\\"[0])
+ end
+ end
+
+ def initialize(str)
+ super
+ raise InvalidCharacterClass if invalid_expression?
+ end
+
+ def build_machine(start_state)
+ each_symbol do |sym|
+ start_state.add_transition :symbol => sym, :final => true
+ end
+ end
+
+ def each_symbol(&block)
+ symbols.each(&block)
+ end
+
+ def symbols
+ start, finish = expression.split("-")
+ Range.new(start, finish).to_a.map { |e| e.to_s }
+ end
+
+ def to_regex_s
+ "#{OPEN_BRACKET}#{expression}#{CLOSE_BRACKET}"
+ end
+
+ private
+
+ def valid_expression?
+ one, two = expression.split("-")
+ two > one
+ end
+
+ def invalid_expression?
+ !valid_expression?
+ end
+ end
+ end
+end
diff --git a/lib/hopcroft/regex/concatenation.rb b/lib/hopcroft/regex/concatenation.rb
new file mode 100644
index 0000000..2e2695f
--- /dev/null
+++ b/lib/hopcroft/regex/concatenation.rb
@@ -0,0 +1,38 @@
+require 'enumerator'
+
+module Hopcroft
+ module Regex
+ class Concatenation < Base
+ def initialize(*objs)
+ @array = objs
+ end
+
+ def to_regex_s
+ @array.map { |a| a.to_regex_s }.join("")
+ end
+
+ def to_a
+ @array
+ end
+
+ def to_machine
+ machines = components.dup
+
+ machines.each_cons(2) do |first, second|
+ first.final_states.each do |state|
+ state.add_transition :machine => second
+ state.final_state = false
+ end
+ end
+
+ machines.first
+ end
+
+ private
+
+ def components
+ @array.map { |a| a.to_machine }
+ end
+ end
+ end
+end
diff --git a/lib/hopcroft/regex/dot.rb b/lib/hopcroft/regex/dot.rb
new file mode 100644
index 0000000..3b5641e
--- /dev/null
+++ b/lib/hopcroft/regex/dot.rb
@@ -0,0 +1,15 @@
+module Hopcroft
+ module Regex
+ class Dot < Base
+ def initialize; end
+
+ def build_machine(start)
+ start.add_transition :any => true, :final => true
+ end
+
+ def to_regex_s
+ DOT
+ end
+ end
+ end
+end
diff --git a/lib/hopcroft/regex/kleen_star.rb b/lib/hopcroft/regex/kleen_star.rb
new file mode 100644
index 0000000..0290b2f
--- /dev/null
+++ b/lib/hopcroft/regex/kleen_star.rb
@@ -0,0 +1,20 @@
+module Hopcroft
+ module Regex
+ class KleenStar < Base
+ def build_machine(start)
+ other_machine = @expression.to_machine
+
+ start.final_state = true
+ start.add_transition :machine => other_machine
+
+ other_machine.final_states.each do |state|
+ state.add_transition :state => start, :epsilon => true
+ end
+ end
+
+ def to_regex_s
+ "#{expression.to_regex_s}#{STAR}"
+ end
+ end
+ end
+end
diff --git a/lib/hopcroft/regex/optional_symbol.rb b/lib/hopcroft/regex/optional_symbol.rb
new file mode 100644
index 0000000..59b7d73
--- /dev/null
+++ b/lib/hopcroft/regex/optional_symbol.rb
@@ -0,0 +1,16 @@
+module Hopcroft
+ module Regex
+ class OptionalSymbol < Base
+ def to_regex_s
+ "#{expression.to_regex_s}#{QUESTION}"
+ end
+
+ def build_machine(start)
+ start.final_state = true
+
+ machine = @expression.to_machine
+ start.add_transition :machine => machine
+ end
+ end
+ end
+end
diff --git a/lib/hopcroft/regex/parser.rb b/lib/hopcroft/regex/parser.rb
new file mode 100644
index 0000000..a3aae4a
--- /dev/null
+++ b/lib/hopcroft/regex/parser.rb
@@ -0,0 +1,30 @@
+require "treetop"
+Treetop.load File.dirname(__FILE__) + "/regex_parser"
+
+module Hopcroft
+ module Regex
+ class Parser
+ class ParseError < StandardError; end
+
+ def self.parse(str)
+ new.parse_and_eval(str)
+ end
+
+ def initialize
+ @parser = Regex::TreetopRegexParser.new
+ end
+
+ def parse(str)
+ @parser.parse(str)
+ end
+
+ def parse_and_eval(str)
+ if parse = parse(str)
+ parse.eval
+ else
+ raise ParseError, "could not parse the regex '#{str}'"
+ end
+ end
+ end
+ end
+end
diff --git a/lib/hopcroft/regex/plus.rb b/lib/hopcroft/regex/plus.rb
new file mode 100644
index 0000000..4e50fde
--- /dev/null
+++ b/lib/hopcroft/regex/plus.rb
@@ -0,0 +1,19 @@
+module Hopcroft
+ module Regex
+ class Plus < Base
+ def build_machine(start_state)
+ subexpression = @expression.to_machine
+
+ start_state.add_transition :machine => subexpression
+
+ subexpression.final_states.each do |state|
+ state.add_transition :machine => KleenStar.new(@expression).to_machine
+ end
+ end
+
+ def to_regex_s
+ "#{expression.to_regex_s}#{PLUS}"
+ end
+ end
+ end
+end
diff --git a/lib/hopcroft/regex/regex_parser.treetop b/lib/hopcroft/regex/regex_parser.treetop
new file mode 100644
index 0000000..367406d
--- /dev/null
+++ b/lib/hopcroft/regex/regex_parser.treetop
@@ -0,0 +1,155 @@
+module Hopcroft
+ module Regex
+ grammar TreetopRegex do
+ rule a_regex do
+ expr_with_parens / expr_without_parens
+ end
+
+ rule expr_with_parens do
+ "(" expr_with_parens ")" {
+ def eval
+ expr_with_parens.eval
+ end
+ }
+ /
+ expr_without_parens
+ end
+
+ rule expr_without_parens
+ multi_valued_expression
+ end
+
+ rule multi_valued_expression do
+ car:single_value_expression cdr:single_value_expression* {
+ def eval
+ if cdr.elements.any?
+ a = [car.eval, cdr.elements.map { |element| element.eval }]
+ a.flatten!
+ a.inject { |collection, element| collection + element }
+ else
+ car.eval
+ end
+ end
+ }
+ end
+
+ rule single_value_expression
+ escaped_char / expr_without_escaping
+ end
+
+ rule expr_without_escaping do
+ kleen_star / expr_without_kleen_star
+ end
+
+ rule expr_without_kleen_star do
+ alternation / expr_without_alternation
+ end
+
+ rule expr_without_alternation do
+ plus_expr / expr_without_plus
+ end
+
+ rule expr_without_plus do
+ optional_expr / expr_without_optional_expr
+ end
+
+ rule expr_without_optional_expr do
+ char_class / dot / unescaped_char
+ end
+
+ rule alternation do
+ one:expr_without_alternation "|" two:expr_without_alternation {
+ def eval
+ Alternation.new(one.eval, two.eval)
+ end
+ }
+ end
+
+ rule plus_expr do
+ expr_without_plus "+" {
+ def eval
+ Plus.new(expr_without_plus.eval)
+ end
+ }
+ end
+
+ rule optional_expr do
+ expr_without_optional_expr "?" {
+ def eval
+ OptionalSymbol.new(expr_without_optional_expr.eval)
+ end
+ }
+ end
+
+ rule char_class do
+ "[" char_class_expr "]" {
+ def eval
+ CharacterClass.new(char_class_expr.eval)
+ end
+ }
+ end
+
+ rule char_class_expr do
+ one:char "-" two:char {
+ def eval
+ "#{one.text_value}-#{two.text_value}"
+ end
+ }
+ /
+ char {
+ def eval
+ text_value
+ end
+ }
+ end
+
+ rule kleen_star do
+ expr_without_kleen_star "*" {
+ def eval
+ KleenStar.new(expr_without_kleen_star.eval)
+ end
+ }
+ end
+
+ rule dot do
+ "." {
+ def eval
+ Dot.new
+ end
+ }
+ end
+
+ rule char do
+ unescaped_char / escaped_char
+ end
+
+ rule unescaped_char do
+ non_special_char {
+ def eval
+ Char.new(text_value)
+ end
+ }
+ end
+
+ rule escaped_char do
+ escape_char any_char {
+ def eval
+ Char.new(any_char.text_value)
+ end
+ }
+ end
+
+ rule non_special_char do
+ !("(" / ")" / "[" / "+" / "?" / "+" / "]" / "|" / "*" / "\\") .
+ end
+
+ rule any_char do
+ .
+ end
+
+ rule escape_char do
+ "\\"
+ end
+ end
+ end
+end
diff --git a/script/boot.rb b/script/boot.rb
new file mode 100644
index 0000000..6fdba13
--- /dev/null
+++ b/script/boot.rb
@@ -0,0 +1,12 @@
+
+def load_hopcroft!
+ require "rubygems"
+ reload!
+ include Hopcroft
+end
+
+def reload!
+ load "lib/hopcroft.rb"
+end
+
+load_hopcroft!
diff --git a/spec/hopcoft/integration_spec.rb b/spec/hopcoft/integration_spec.rb
new file mode 100644
index 0000000..986f24d
--- /dev/null
+++ b/spec/hopcoft/integration_spec.rb
@@ -0,0 +1,126 @@
+require File.expand_path(File.dirname(__FILE__) + "/../spec_helper")
+
+module Hopcroft
+ describe "Integration tests" do
+ describe "the regex /a/" do
+ before do
+ @regex = Regex.compile("a")
+ end
+
+ it "should match 'a'" do
+ @regex.should be_matched_by("a")
+ end
+
+ it "should not match 'b'" do
+ @regex.should_not be_matched_by("b")
+ end
+
+ it "should not match 'abasdfasdf'" do
+ @regex.should_not be_matched_by('abasdfasdf')
+ end
+ end
+
+ describe "the regex /ab/" do
+ before do
+ @regex = Regex.compile("ab")
+ end
+
+ it "should match 'ab'" do
+ @regex.should be_matched_by("ab")
+ end
+
+ it "should not match 'x'" do
+ @regex.should_not be_matched_by("x")
+ end
+
+ it "should not match 'ba'" do
+ @regex.should_not be_matched_by("ba")
+ end
+ end
+
+ describe "the regex /a*/" do
+ before do
+ @regex = Regex.compile("a*")
+ end
+
+ it "should be matched by 'a'" do
+ @regex.should be_matched_by("a")
+ end
+
+ it "should be matched by the empty string" do
+ @regex.should be_matched_by("")
+ end
+
+ it "should be matched by 'aa'" do
+ @regex.should be_matched_by("aa")
+ end
+
+ it "should be matched by 'aaa'" do
+ @regex.should be_matched_by("aaa")
+ end
+
+ it "should not be matched by 'aab'" do
+ @regex.should_not be_matched_by("aab")
+ end
+ end
+
+ describe "the regex /a+/" do
+ before do
+ @regex = Regex.compile("a+")
+ end
+
+ it "should be matched by 'a'" do
+ @regex.should be_matched_by("a")
+ end
+
+ it "should NOT be matched by the empty string" do
+ @regex.should_not be_matched_by("")
+ end
+
+ it "should be matched by 'aa'" do
+ @regex.should be_matched_by("aa")
+ end
+
+ it "should not be matched by 'aab'" do
+ @regex.should_not be_matched_by("aab")
+ end
+
+ it "should be matched by 'aaa'" do
+ @regex.matches?("aaa")
+ @regex.should be_matched_by("aaa")
+ end
+ end
+
+ describe "the regex /a|b/" do
+ before do
+ @regex = Regex.compile("a|b")
+ end
+
+ it "should be matched by an 'a'" do
+ @regex.should be_matched_by("a")
+ end
+
+ it "should be matched by a 'b'" do
+ @regex.should be_matched_by("b")
+ end
+
+ it "should not be matched by a 'c'" do
+ @regex.should_not be_matched_by("c")
+ end
+
+ it "should not be matched with the string 'ab'" do
+ @regex.matched_by?("ab")
+ @regex.should_not be_matched_by("ab")
+ end
+ end
+
+ describe "the regex /(a|b)+/" do
+ it "should not match the empty string"
+ it "should match an a"
+ it "should match 'b'"
+ it "should match 'aaa'"
+ it "should match 'bbb'"
+ it "should match 'ababababbbaaa'"
+ end
+ end
+end
diff --git a/spec/hopcoft/machine/any_character_transition_spec.rb b/spec/hopcoft/machine/any_character_transition_spec.rb
new file mode 100644
index 0000000..fa28ce5
--- /dev/null
+++ b/spec/hopcoft/machine/any_character_transition_spec.rb
@@ -0,0 +1,24 @@
+require File.expand_path(File.dirname(__FILE__) + "/../../spec_helper")
+
+module Hopcroft
+ module Machine
+ describe AnyCharTransition do
+ before do
+ @to = mock(State)
+ @transition = AnyCharTransition.new(@to)
+ end
+
+ it "should be a kind_of? Transition" do
+ @transition.should be_a_kind_of(Transition)
+ end
+
+ it "should have a unique transition symbol (it should not be a symbol)" do
+ @transition.symbol.class.should_not == Symbol
+ end
+
+ it "should not have nil as the symbol" do
+ @transition.symbol.should_not be_nil
+ end
+ end
+ end
+end
diff --git a/spec/hopcoft/machine/epsilon_transition_spec.rb b/spec/hopcoft/machine/epsilon_transition_spec.rb
new file mode 100644
index 0000000..348fa18
--- /dev/null
+++ b/spec/hopcoft/machine/epsilon_transition_spec.rb
@@ -0,0 +1,24 @@
+require File.expand_path(File.dirname(__FILE__) + "/../../spec_helper")
+
+module Hopcroft
+ module Machine
+ describe EpsilonTransition do
+ before do
+ @to = mock(State)
+ @transition = EpsilonTransition.new(@to)
+ end
+
+ it "should be a kind_of? Transition" do
+ @transition.should be_a_kind_of(Transition)
+ end
+
+ it "should have a unique transition symbol (it should not be a symbol)" do
+ @transition.symbol.class.should_not == Symbol
+ end
+
+ it "should not have nil as the symbol" do
+ @transition.symbol.should_not be_nil
+ end
+ end
+ end
+end
diff --git a/spec/hopcoft/machine/state_machine_spec.rb b/spec/hopcoft/machine/state_machine_spec.rb
new file mode 100644
index 0000000..709ebcf
--- /dev/null
+++ b/spec/hopcoft/machine/state_machine_spec.rb
@@ -0,0 +1,96 @@
+require File.expand_path(File.dirname(__FILE__) + "/../../spec_helper")
+
+module Hopcroft
+ module Machine
+ describe StateMachine do
+ before do
+ @machine = StateMachine.new
+ end
+
+ it "should have a start state when beginning" do
+ @machine.start_state.should be_a_kind_of(State)
+ end
+
+ it "should be able to add a start state" do
+ state = State.new
+
+ @machine.start_state = state
+ @machine.start_state.should equal(state)
+ end
+
+ it "should accept a start state in the constructor" do
+ state = State.new
+ machine = StateMachine.new(state)
+ machine.start_state.should equal(state)
+ end
+
+ it "should be able to traverse a list of states" do
+ state = State.new
+ second_state = State.new
+ state.add_transition(:symbol => :foo, :state => second_state)
+
+ @machine.start_state = state
+ @machine.states.should == [state, second_state]
+ end
+
+ describe "building the transition table" do
+ before do
+ @machine = StateMachine.new
+ end
+
+ it "should match a transition of the start state to another state" do
+ start_state = @machine.start_state
+ second_state = start_state.add_transition :symbol => :foo
+
+ @machine.state_table.targets_for(start_state, :foo).should == [second_state]
+ end
+
+ it "should match multiple transitions on the same key (a NFA)" do
+ start_state = @machine.start_state
+ state_one = start_state.add_transition :symbol => :foo
+ state_two = start_state.add_transition :symbol => :foo
+
+ @machine.state_table.targets_for(start_state, :foo).should == [state_one, state_two]
+ end
+
+ it "should be able to have a state with a transition to itself" do
+ start_state = @machine.start_state
+ start_state.add_transition :symbol => :foo, :state => start_state
+
+ @machine.state_table.targets_for(start_state, :foo).should == [start_state]
+ end
+
+ it "should add a start state with no transitions to the table" do
+ start_state = @machine.start_state
+
+ @machine.state_table.start_state.should == start_state
+ end
+ end
+
+ describe "deep_copy" do
+ before do
+ @machine = StateMachine.new
+ end
+
+ it "should create a new instance" do
+ @machine.deep_clone.should_not equal(@machine)
+ end
+
+ it "should have a cloned start state" do
+ @machine.deep_clone.start_state.should_not equal(@machine.start_state)
+ end
+
+ it "should have the cloned start state as a final state if the original machine did" do
+ @machine.start_state.final_state = true
+ @machine.deep_clone.start_state.should be_a_final_state
+ end
+
+ it "should call deep_clone on the start state" do
+ @machine.start_state.should_receive(:deep_clone)
+ @machine.deep_clone
+ end
+ end
+ end
+ end
+end
+
diff --git a/spec/hopcoft/machine/state_spec.rb b/spec/hopcoft/machine/state_spec.rb
new file mode 100644
index 0000000..77bf741
--- /dev/null
+++ b/spec/hopcoft/machine/state_spec.rb
@@ -0,0 +1,277 @@
+require File.expand_path(File.dirname(__FILE__) + "/../../spec_helper")
+
+module Hopcroft
+ module Machine
+ describe State do
+ it "should set the start state on the first state to a start state" do
+ state = State.new
+ state.should be_a_start_state
+ end
+
+ it "should have no transitions to begin with" do
+ s = State.new
+ s.transitions.should == []
+ end
+
+ it "should be able to add transitions" do
+ s = State.new
+ s.add_transition :symbol => :foo
+ s.transitions.size.should == 1
+ end
+
+ it "should be a start state" do
+ s = State.new
+ s.should be_a_start_state
+ end
+
+ it "should have start state assigned" do
+ s = State.new
+ s.start_state = false
+ s.should_not be_a_start_state
+ end
+
+ it "should not be a final state by default" do
+ s = State.new
+ s.should_not be_a_final_state
+ s.should_not be_final
+ end
+
+ it "should have the final state as assignable" do
+ s = State.new
+ s.final_state = true
+ s.should be_a_final_state
+ s.should be_final
+ end
+
+ describe "transitions" do
+ before do
+ @state = State.new
+ end
+
+ it "should create a transition when calling add_transition" do
+ @state.add_transition :symbol => :foo
+ @state.transitions.first.should be_a_kind_of(Transition)
+ end
+
+ it "should pass on the symbol to the transition" do
+ @state.add_transition :symbol => :baz
+ transition = @state.transitions.first
+ transition.symbol.should == :baz
+ end
+
+ it "should construct a new state when none provided" do
+ @state.add_transition :symbol => :foo
+ transition = @state.transitions.first
+ transition.state.should be_a_kind_of(State)
+ end
+
+ it "should not have the new state as the start state" do
+ @state.add_transition :symbol => :foo
+ transition = @state.transitions.first
+ transition.state.should_not be_a_start_state
+ end
+
+ it "should be able to mark the new state as a final state" do
+ @state.add_transition :symbol => :foo, :final => true
+ transition = @state.transitions.first
+ transition.state.should be_a_final_state
+ end
+
+ it "should take another state as the transition target" do
+ state = mock('state', :null_object => true)
+
+ @state.add_transition :symbol => :foo, :state => state
+ transition = @state.transitions.first
+ transition.state.should == state
+ end
+
+ it "should be able to add transitions recursively" do
+ s1 = State.new
+ s2 = State.new
+
+ s1.add_transition :state => s2, :epsilon => true
+ s2.add_transition :state => s1, :epsilon => true
+
+ table = TransitionTable.new
+
+ s1.add_transitions_to_table(table)
+ end
+
+ describe "passed :machine => m" do
+ before do
+ @state = State.new
+ @machine = StateMachine.new
+ end
+
+ it "should add a transition to another state machines first state" do
+ other_machine_start_state = @machine.start_state
+
+ @state.add_transition :machine => @machine
+
+ @state.transitions.first.state.should == other_machine_start_state
+ end
+
+ it "should add the transition as an epsilon transition" do
+ @state.add_transition :machine => @machine
+
+ @state.transitions.first.should be_a_kind_of(EpsilonTransition)
+ end
+
+ it "should no longer have the other machines start state as a start state in this machine" do
+ other_machine_start_state = @machine.start_state
+
+ @state.add_transition :machine => @machine
+
+ @state.transitions.first.state.should_not be_a_start_state
+ end
+ end
+ end
+
+ describe "name" do
+ it "should take a name param" do
+ state = State.new(:name => "foo")
+ state.name.should == "foo"
+ end
+
+ it "should auto-assign a state #" do
+ State.reset_counter!
+ state = State.new
+ state.name.should == "State 1"
+ end
+
+ it "should assign 'State 2' for the second state created" do
+ State.reset_counter!
+
+ State.new
+ state2 = State.new
+
+ state2.name.should == "State 2"
+ end
+ end
+
+ describe "to_s" do
+ it "should be aliased to the name" do
+ s = State.new
+ s.method(:name).should == s.method(:to_s)
+ end
+ end
+
+ describe "inspect" do
+ it "should display the name" do
+ s = State.new(:name => "State 1")
+ s.inspect.should include("State 1")
+ end
+
+ it "should show start state, final state, etc." do
+ s = State.new(:name => "State 1", :start_state => true, :final => true)
+ s.inspect.should == "State 1 {start: true, final: true, transitions: 0}"
+ end
+
+ it "should display the correct value for the start state" do
+ s = State.new(:name => "State 1", :start_state => false, :final => true)
+ s.inspect.should == "State 1 {start: false, final: true, transitions: 0}"
+ end
+
+ it "should display the correct value for the final state" do
+ s = State.new(:name => "State 1", :start_state => true, :final => false)
+ s.inspect.should == "State 1 {start: true, final: false, transitions: 0}"
+ end
+
+ it "should display 1 transition" do
+ s = State.new(:name => "State 1", :start_state => true, :final => true)
+ s.add_transition
+
+ s.inspect.should == "State 1 {start: true, final: true, transitions: 1}"
+ end
+ end
+
+ describe "deep_clone" do
+ before do
+ @state = State.new
+ end
+
+ it "should be of class State" do
+ clone = @state.deep_clone
+ clone.should be_a_kind_of(State)
+ end
+
+ it "should be a new instance" do
+ clone = @state.deep_clone
+ clone.should_not equal(@state)
+ end
+
+ it "should be a final state if the original was a final state" do
+ @state.final_state = true
+ clone = @state.deep_clone
+ clone.should be_a_final_state
+ end
+
+ it "should not have the same transition objects" do
+ @state.add_transition
+ transition = @state.transitions.first
+
+ clone = @state.deep_clone
+ clone.transitions.first.should_not equal(transition)
+ end
+
+ it "should have one transition if the original had one transition" do
+ @state.add_transition
+
+ clone = @state.deep_clone
+ clone.transitions.size.should == 1
+ end
+
+ it "should have two transitions if the original had two transition" do
+ @state.add_transition
+ @state.add_transition
+
+ clone = @state.deep_clone
+ clone.transitions.size.should == 2
+ end
+
+ it "should have a transition as a Transition object" do
+ @state.add_transition
+
+ clone = @state.deep_clone
+ clone.transitions.first.should be_a_kind_of(Transition)
+ end
+
+ it "should call deep_clone on the transitions" do
+ @state.add_transition
+
+ @state.transitions.first.should_receive(:deep_clone)
+ @state.deep_clone
+ end
+ end
+
+ describe "substates" do
+ before do
+ @state = State.new
+ end
+
+ it "should have none with no transitions" do
+ @state.substates.should == []
+ end
+
+ it "should have a state which is linked to by a transition" do
+ new_state = @state.add_transition :symbol => :foo
+ @state.substates.should == [new_state]
+ end
+
+ it "should have multiple states" do
+ one = @state.add_transition :symbol => :foo
+ two = @state.add_transition :symbol => :foo
+
+ @state.substates.should == [one, two]
+ end
+
+ it "should show states of the states (should find the states substates recursively)" do
+ substate = @state.add_transition :symbol => :foo
+ sub_substate = substate.add_transition :symbol => :foo
+
+ @state.substates.should == [substate, sub_substate]
+ end
+ end
+ end
+ end
+end
diff --git a/spec/hopcoft/machine/table_converter_spec.rb b/spec/hopcoft/machine/table_converter_spec.rb
new file mode 100644
index 0000000..1a1c5fc
--- /dev/null
+++ b/spec/hopcoft/machine/table_converter_spec.rb
@@ -0,0 +1,133 @@
+require File.expand_path(File.dirname(__FILE__) + "/../../spec_helper")
+
+module Hopcroft
+ module Machine
+ describe TableConverter do
+ before do
+ @hash = Dictionary.new
+ @converter = TableConverter.new(@hash)
+ end
+
+ describe "transition symbols" do
+ it "should have no transitions when an empty hash" do
+ @converter.transition_symbols.should == []
+ end
+
+ it "should have the symbols" do
+ @hash[:state1] = { :transition2 => [:state2] }
+
+ @converter.transition_symbols.should == [:transition2]
+ end
+
+ it "should have many transition symbols under various states" do
+ @hash[:state1] = { :transition1 => [] }
+ @hash[:state2] = { :transition2 => [] }
+
+ @converter.transition_symbols.should include(:transition1)
+ @converter.transition_symbols.should include(:transition2)
+ end
+
+ it "should use all of the transition symbols per state" do
+ @hash[:state1] = { :transition_symbol_1 => [], :transition_symbol_2 => [] }
+
+ @converter.transition_symbols.should include(:transition_symbol_1)
+ @converter.transition_symbols.should include(:transition_symbol_2)
+ end
+
+ it "should only include a transition symbol once, even if listed under multiple states" do
+ @hash[:state1] = { :transition_symbol_1 => [] }
+ @hash[:state2] = { :transition_symbol_1 => [] }
+
+ @converter.transition_symbols.should == [:transition_symbol_1]
+ end
+
+ it "should cache the transition symbols" do
+ @hash[:state] = { :one => [] }
+
+ lambda {
+ @hash.delete(:state)
+ }.should_not change { @converter.transition_symbols.dup }
+ end
+ end
+
+ describe "primary states" do
+ it "should be empty for an empty hash" do
+ @converter.primary_states.should == []
+ end
+
+ it "should have a state used as an index in the hash" do
+ @hash[:one] = {}
+ @converter.primary_states.should == [:one]
+ end
+
+ it "should cache the primary states" do
+ @hash[:one] = {:two => [:three]}
+
+ lambda {
+ @hash.delete(:one)
+ }.should_not change { @converter.primary_states.dup }
+ end
+ end
+
+ describe "header" do
+ it "should have an empty string with an empty hash" do
+ @converter.header.should == [""]
+ end
+
+ it "should use the transition symbols, preceeded by an empty string" do
+ @hash[:one] = {:two => []}
+ @converter.header.should == ["", :two]
+ end
+
+ it "should use multiple transition symbols" do
+ @hash[:one] = {:trans1 => []}
+ @hash[:two] = {:trans2 => []}
+
+ @converter.header.should == ["", :trans1, :trans2]
+ end
+ end
+
+ describe "body" do
+ it "should be empty for an empty hash" do
+ @converter.body.should == []
+ end
+
+ it "should have a state followed by it's result" do
+ @hash[:one] = { :transition1 => [:two] }
+
+ @converter.body.should == [[:one, [:two]]]
+ end
+
+ it "should have a symbol with no values if none are present (a degenerative case)" do
+ @hash[:one] = { :transition => [] }
+ @converter.body.should == [[:one, []]]
+ end
+
+ it "should use empty arrays for symbols which do not exist (the empty set)" do
+ @hash[:one] = { :t1 => [:two] }
+ @hash[:two] = { :t2 => [:three] }
+
+ @converter.body.should == [[:one, [:two], []], [:two, [], [:three]]]
+ end
+
+ it "should use multiple target states (for a NFA)" do
+ @hash[:one] = { :t1 => [:two, :three]}
+
+ @converter.body.should == [[:one, [:two, :three]]]
+ end
+ end
+
+ describe "to_a" do
+ it "should be empty with an empty hash" do
+ @converter.to_a.should == []
+ end
+
+ it "should use the header and body" do
+ @hash[:one] = { :two => [:three] }
+
+ @converter.to_a.should == [["", :two], [[:one, [:three]]]]
+ end
+ end
+ end
+ end
+end
diff --git a/spec/hopcoft/machine/table_displayer_spec.rb b/spec/hopcoft/machine/table_displayer_spec.rb
new file mode 100644
index 0000000..5726176
--- /dev/null
+++ b/spec/hopcoft/machine/table_displayer_spec.rb
@@ -0,0 +1,117 @@
+require File.expand_path(File.dirname(__FILE__) + "/../../spec_helper")
+
+module Hopcroft
+ module Machine
+ describe TableDisplayer do
+ before do
+ @hash = Dictionary.new
+ @displayer = TableDisplayer.new(@hash)
+ end
+
+ describe "converted table" do
+ it "should convert symbols to strings in the header" do
+ state = State.new(:start_state => false)
+
+ @hash[state] = { :transition => [state] }
+
+ @displayer.header.should == ["", "transition"]
+ end
+
+ it "should convert states to state names in the body" do
+ state = State.new(:start_state => false)
+
+ @hash[state] = { :transition => [state] }
+ @displayer.body.should == [[state.name, state.name]]
+ end
+
+ it "should join multiple states with a comma (for an nfa)" do
+ state1 = State.new(:name => "State 1", :start_state => false)
+ state2 = State.new(:name => "State 2", :start_state => false)
+
+ @hash[state1] = { :transition => [state1, state2] }
+
+ @displayer.body.should == [["State 1", "State 1, State 2"]]
+ end
+
+ it "should display an empty string as an empty string (when there is no state transition" do
+ state = State.new(:name => "State 1", :start_state => false)
+ @hash[state] = { :transition => [] }
+
+ @displayer.body.should == [["State 1", ""]]
+ end
+
+ it "should have the header + footer combined in to_a" do
+ state = State.new(:name => "A", :start_state => false)
+
+ @hash[state] = { :transition => [state] }
+
+ @displayer.to_a.should == [["", "transition"], [["A", "A"]]]
+ end
+
+ it "should output a table" do
+ state = State.new(:name => "A", :start_state => false)
+
+ @hash[state] = { :transition => [state] }
+
+ @displayer.to_s.should == <<-HERE
+
++---+------------+
+| | transition |
++---+------------+
+| A | A |
++---+------------+
+HERE
+ end
+
+ it "should display a -> in front of a start state in the first row" do
+ state = State.new(:name => "A", :start_state => true)
+
+ @hash[state] = { :transition => [state] }
+
+ @displayer.to_a.should == [["", "transition"], [["-> A", "A"]]]
+ end
+
+ it "should use the correct name of the state" do
+ state = State.new(:name => "B", :start_state => true)
+
+ @hash[state] = { :transition => [state] }
+
+ @displayer.to_a.should == [["", "transition"], [["-> B", "B"]]]
+ end
+
+ it "should display a * next to a final state in the first row" do
+ state = State.new(:name => "A", :final => true, :start_state => false)
+
+ @hash[state] = { :transition => [state] }
+
+ @displayer.to_a.should == [["", "transition"], [["* A", "* A"]]]
+ end
+
+ it "should use the correct state name with the star" do
+ state = State.new(:name => "B", :final => true, :start_state => false)
+
+ @hash[state] = { :transition => [state] }
+
+ @displayer.to_a.should == [["", "transition"], [["* B", "* B"]]]
+ end
+
+ it "should display a * -> <state-name> if the state is both final and a start state" do
+ state = State.new(:name => "A", :final => true, :start_state => true)
+
+ @hash[state] = { :transition => [state] }
+
+ @displayer.to_a.should == [["", "transition"], [["* -> A", "* A"]]]
+ end
+
+ it "should display a star next to a final state in the middle of the table" do
+ start_state = State.new(:name => "A", :final => false, :start_state => true)
+ final_state = State.new(:name => "B", :final => true, :start_state => false)
+
+ @hash[start_state] = { :transition => [final_state] }
+
+ @displayer.body.should == [["-> A", "* B"]]
+ end
+ end
+ end
+ end
+end
diff --git a/spec/hopcoft/machine/transition_spec.rb b/spec/hopcoft/machine/transition_spec.rb
new file mode 100644
index 0000000..a537606
--- /dev/null
+++ b/spec/hopcoft/machine/transition_spec.rb
@@ -0,0 +1,64 @@
+require File.expand_path(File.dirname(__FILE__) + "/../../spec_helper")
+
+module Hopcroft
+ module Machine
+ describe Transition do
+ before do
+ @state = mock(State)
+ end
+
+ it "should have a symbol" do
+ t = Transition.new(:foo, @state)
+ t.symbol.should == :foo
+ end
+
+ it "should convert a string symbol given to a symbol" do
+ t = Transition.new("foo", @state)
+ t.symbol.should == :foo
+ end
+
+ it "should have the transition state" do
+ t = Transition.new(:foo, @state)
+ t.state.should == @state
+ end
+
+ describe "deep_clone" do
+ before do
+ @to = State.new
+ end
+
+ it "should return a transition" do
+ t = Transition.new(:sym, @to)
+ t.deep_clone.should be_a_kind_of(Transition)
+ end
+
+ it "should return a new instance" do
+ t = Transition.new(:sym, @to)
+ t.deep_clone.should_not equal(t)
+ end
+
+ it "should use the symbol" do
+ t = Transition.new(:sym, @to)
+ t.deep_clone.symbol.should == :sym
+ end
+
+ it "should use the correct symbol" do
+ t = Transition.new(:foo, @to)
+ t.deep_clone.symbol.should == :foo
+ end
+
+ it "should have the state" do
+ t = Transition.new(:foo, @to)
+ t.deep_clone.state.should be_a_kind_of(State)
+ end
+
+ it "should call deep_clone on the to state" do
+ t = Transition.new(:foo, @to)
+
+ @to.should_receive(:deep_clone)
+ t.deep_clone
+ end
+ end
+ end
+ end
+end
diff --git a/spec/hopcoft/machine/transition_table/targets_for_spec.rb b/spec/hopcoft/machine/transition_table/targets_for_spec.rb
new file mode 100644
index 0000000..8b4364e
--- /dev/null
+++ b/spec/hopcoft/machine/transition_table/targets_for_spec.rb
@@ -0,0 +1,115 @@
+require File.expand_path(File.dirname(__FILE__) + "/../../../spec_helper")
+
+module Hopcroft
+ module Machine
+ describe TransitionTable do
+ describe "new_transitions_for" do
+ before do
+ @table = TransitionTable.new
+ @state1 = State.new
+ @state2 = State.new(:start_state => false)
+ @state3 = State.new(:start_state => false)
+ @state4 = State.new(:start_state => false)
+ @state5 = State.new(:start_state => false)
+ end
+
+ it "should return a transition under a symbol" do
+ @table.add_state_change @state1, @state2, :a
+
+ @table.targets_for(@state1, :a).should == [@state2]
+ end
+
+ it "should use the correct sym" do
+ @table.add_state_change @state1, @state2, :b
+
+ @table.targets_for(@state1, :b).should == [@state2]
+ end
+
+ it "should only find states matching the sym" do
+ @table.add_state_change @state1, @state2, :a
+ @table.add_state_change @state1, @state3, :b
+
+ @table.targets_for(@state1, :a).should == [@state2]
+ end
+
+ it "should return multiple transitions under the same sym" do
+ @table.add_state_change @state1, @state2, :a
+ @table.add_state_change @state1, @state3, :a
+
+ @table.targets_for(@state1, :a).should == [@state2, @state3]
+ end
+
+ it "should return an empty array if it cannot find the sym" do
+ @table.add_state_change @state1, @state2, :a
+
+ @table.targets_for(@state1, :b).should == []
+ end
+
+ it "should return an empty array if it cannot find the state" do
+ @table.add_state_change @state1, @state2, :a
+
+ @table.targets_for(mock('a state'), :a).should == []
+ end
+
+ it "should find an epsilon transition *after* a match" do
+ @table.add_state_change @state1, @state2, :a
+ @table.add_state_change @state2, @state3, EpsilonTransition
+
+ @table.targets_for(@state1, :a).should == [@state2, @state3]
+ end
+
+ it "should find multiple epsilon transitions" do
+ @table.add_state_change @state1, @state2, :a
+ @table.add_state_change @state2, @state3, EpsilonTransition
+ @table.add_state_change @state2, @state4, EpsilonTransition
+
+ @table.targets_for(@state1, :a).should == [@state2, @state3, @state4]
+ end
+
+ it "should follow epsilon transitions following other epsilon transitions *after* a match" do
+ @table.add_state_change @state1, @state2, :a
+ @table.add_state_change @state2, @state3, EpsilonTransition
+ @table.add_state_change @state3, @state4, EpsilonTransition
+
+ @table.targets_for(@state1, :a).should == [@state2, @state3, @state4]
+ end
+
+ it "should not follow a sym after matching the sym" do
+ @table.add_state_change @state1, @state2, :a
+ @table.add_state_change @state2, @state3, :a
+
+ @table.targets_for(@state1, :a).should == [@state2]
+ end
+
+ it "should not follow a sym after matching a sym when epsilon transitions connect the syms" do
+ @table.add_state_change @state1, @state2, :a
+ @table.add_state_change @state2, @state3, EpsilonTransition
+ @table.add_state_change @state3, @state4, :a
+
+ @table.targets_for(@state1, :a).should == [@state2, @state3]
+ end
+
+ it "should not find other (non-epsilon) transitions after a match" do
+ @table.add_state_change @state1, @state2, :a
+ @table.add_state_change @state2, @state3, :a
+ @table.add_state_change @state2, @state3, EpsilonTransition
+ @table.add_state_change @state3, @state4, :a
+
+ @table.targets_for(@state1, :a).should == [@state2, @state3]
+ end
+
+ it "should match a char under an AnyCharTransition" do
+ @table.add_state_change @state1, @state2, AnyCharTransition
+
+ @table.targets_for(@state1, :a).should == [@state2]
+ end
+
+ it "should match any char" do
+ @table.add_state_change @state1, @state2, AnyCharTransition
+
+ @table.targets_for(@state1, :b).should == [@state2]
+ end
+ end
+ end
+ end
+end
diff --git a/spec/hopcoft/machine/transition_table_spec.rb b/spec/hopcoft/machine/transition_table_spec.rb
new file mode 100644
index 0000000..275c453
--- /dev/null
+++ b/spec/hopcoft/machine/transition_table_spec.rb
@@ -0,0 +1,249 @@
+require File.expand_path(File.dirname(__FILE__) + "/../../spec_helper")
+
+module Hopcroft
+ module Machine
+ describe TransitionTable do
+ describe "adding a state change" do
+ before do
+ @table = TransitionTable.new
+ end
+
+ it "should create a two dimensional entry, with [from_state][transition] = [to_state]" do
+ from = mock(State, :start_state? => false)
+ to = mock(State, :start_state? => false)
+
+ @table.add_state_change(from, to, :a)
+
+ @table.targets_for(from, :a).should == [to]
+ end
+
+ it "should be able to use strings when finding a start state" do
+ from = mock State, :start_state? => true, :final? => false
+ to = mock State, :start_state? => false, :final? => true
+
+ @table.add_state_change(from, to, :a)
+ @table.start_state = from
+
+ @table.matches?("a").should be_true
+ end
+
+ it "should be able to use multiple transitions from the same state" do
+ from = mock(State, :start_state? => false)
+ first_result = mock(State, :start_state? => false)
+ second_result = mock(State, :start_state? => false)
+
+ @table.start_state = from
+ @table.add_state_change(from, first_result, :a)
+ @table.add_state_change(from, second_result, :b)
+
+ @table.targets_for(from, :a).should == [first_result]
+ @table.targets_for(from, :b).should == [second_result]
+ end
+
+ it "should be able to use the same transition symbol to different states (for an NFA)" do
+ from = mock(State, :start_state? => false)
+ first_result = mock(State, :start_state? => false)
+ second_result = mock(State, :start_state? => false)
+
+ @table.add_state_change(from, first_result, :a)
+ @table.add_state_change(from, second_result, :a)
+
+ @table.targets_for(from, :a).should == [first_result, second_result]
+ end
+
+ it "should have a transition for an 'any' transition" do
+ from = State.new :start_state => true
+ to = from.add_transition :any => true
+
+ transition = from.transitions.first.symbol
+
+ @table.add_state_change from, to, transition
+
+ @table.targets_for(from, :a).should == [to]
+ end
+ end
+
+ describe "targets_for" do
+ before do
+ @table = TransitionTable.new
+ @state = mock(State, :start_state? => false, :final? => false)
+ @transition = :foo
+ end
+
+ it "should reutrn an empty array if it indexes the state, but no transitions for that state" do
+ @table.add_state_change(@state, @state, :foo)
+
+ @table.targets_for(@state, :bar).should == []
+ end
+
+ it "should return an empty array if it does not index the state" do
+ @table.targets_for(@state, :foo).should == []
+ end
+ end
+
+ describe "matching a symbol" do
+ before do
+ @table = TransitionTable.new
+ end
+
+ it "should match if one symbol in the table, and the symbol is given" do
+ start_state = mock(State, :final? => false, :start_state? => true)
+ final_state = mock(State, :final? => true, :start_state? => false)
+
+ @table.start_state = start_state
+ @table.add_state_change(start_state, final_state, :foo)
+
+ @table.matches?([:foo]).should be_true
+ end
+
+ it "should not match when it cannot index the transition" do
+ start_state = mock(State, :final? => false, :start_state? => true)
+ final_state = mock(State, :final? => true, :start_state? => false)
+
+ @table.start_state = start_state
+ @table.add_state_change(start_state, final_state, :foo)
+
+ @table.matches?([:bar]).should be_false
+ end
+
+ it "should not match if the last state in the input is not a final state" do
+ start_state = mock(State, :final? => false, :start_state? => true)
+ final_state = mock(State, :final? => false, :start_state? => false)
+
+ @table.start_state = start_state
+ @table.add_state_change(start_state, final_state, :foo)
+
+ @table.matches?([:foo]).should be_false
+ end
+
+ it "should raise an error if there is no start state" do
+ lambda {
+ @table.matches?([:foo])
+ }.should raise_error(TransitionTable::MissingStartState)
+ end
+
+ it "should match when following two symbols" do
+ start_state = mock(State, :final? => false, :start_state? => true)
+ state_one = mock(State, :final? => false, :start_state? => false)
+ state_two = mock(State, :final? => true, :start_state? => false)
+
+ @table.start_state = start_state
+ @table.add_state_change start_state, state_one, :one
+ @table.add_state_change state_one, state_two, :two
+
+ @table.matches?([:one, :two]).should be_true
+ end
+
+ it "should not match when following two symbols, and the last is not a final state" do
+ start_state = mock(State, :final? => false, :start_state? => true)
+ state_one = mock(State, :final? => false, :start_state? => false)
+ state_two = mock(State, :final? => false, :start_state? => false)
+
+ @table.start_state = start_state
+ @table.add_state_change start_state, state_one, :one
+ @table.add_state_change state_one, state_two, :two
+
+ @table.matches?([:one, :two]).should be_false
+ end
+
+ it "should match a NFA, where a start state leads to one of two possible final states" do
+ start_state = mock(State, :final? => false, :start_state? => true)
+ state_one = mock(State, :final? => false, :start_state? => false)
+ state_two = mock(State, :final? => true, :start_state? => false)
+
+ @table.start_state = start_state
+ @table.add_state_change start_state, state_one, :one
+ @table.add_state_change start_state, state_two, :one
+
+ @table.matches?([:one]).should be_true
+ end
+
+ it "should not match when the one state does not transition to the other" do
+ start_state = mock(State, :final? => false, :start_state? => true)
+ state_one = mock(State, :final? => false, :start_state? => false)
+ state_two = mock(State, :final? => true, :start_state? => false)
+
+ @table.start_state = start_state
+ @table.add_state_change start_state, state_one, :one
+ @table.add_state_change start_state, state_two, :two
+
+ @table.matches?([:one, :two]).should be_false
+ end
+
+ it "should not consume any chars under an epsilon transition" do
+ start_state = mock(State, :final? => false, :start_state? => true)
+ state_two = mock(State, :final? => true, :start_state? => false)
+
+ @table.start_state = start_state
+ @table.add_state_change start_state, state_two, EpsilonTransition
+
+ @table.matches?([]).should be_true
+ end
+ end
+
+ describe "inspect" do
+ before do
+ @table = TransitionTable.new
+ @displayer = mock TableDisplayer
+ end
+
+ it "should output a state table" do
+ TableDisplayer.should_receive(:new).with(@table).and_return @displayer
+ @displayer.should_receive(:to_s)
+
+ @table.inspect
+ end
+
+ it "should display 'Empty table' when empty" do
+ @table.inspect.should == "\nEmpty table"
+ end
+
+ it "should be able to display a start state with no transitions" do
+ start_state = State.new(:start_state => true, :name => "Foo")
+
+ @table.start_state = start_state
+ @table.inspect.should include("Foo")
+ end
+ end
+
+ describe "to_hash" do
+ it "should return a hash" do
+ TransitionTable.new.to_hash.class.should == Hash
+ end
+ end
+
+ describe "initial states" do
+ describe "for a start_state to an epsilon transition" do
+ # +--------------+--------------------------------------+-------------+
+ # | | Hopcroft::Machine::EpsilonTransition | a |
+ # +--------------+--------------------------------------+-------------+
+ # | -> State 207 | State 208 | |
+ # | State 208 | | * State 209 |
+ # +--------------+--------------------------------------+-------------+
+ before do
+ @state1 = State.new :start_state => true, :name => "State 1"
+ @state2 = State.new :start_state => false, :name => "State 2"
+ @state3 = State.new :start_state => false, :name => "State 3", :final_state => true
+
+ @table = TransitionTable.new
+ @table.add_state_change @state1, @state2, EpsilonTransition
+ @table.add_state_change @state2, @state3, :a
+ @table.start_state = @state1
+ end
+
+ it "should have state 1 as an initial state (it is a start state)" do
+ @table.initial_states.should include(@state1)
+ end
+
+ it "should have state 2 as an initial state (it has an epsilon transition from the start state)" do
+ @table.initial_states.should include(@state2)
+ end
+
+ it "should not have state 3 as an initial state" do
+ @table.initial_states.should_not include(@state3)
+ end
+ end
+ end
+ end
+ end
+end
diff --git a/spec/hopcoft/regex/alternation_spec.rb b/spec/hopcoft/regex/alternation_spec.rb
new file mode 100644
index 0000000..88ac750
--- /dev/null
+++ b/spec/hopcoft/regex/alternation_spec.rb
@@ -0,0 +1,79 @@
+require File.expand_path(File.dirname(__FILE__) + "/../../spec_helper")
+
+module Hopcroft
+ module Regex
+ describe Alternation do
+ describe "to_regex_s" do
+ it "should be a|b" do
+ one = Char.new("a")
+ two = Char.new("b")
+
+ alternation = Alternation.new(one, two)
+ alternation.to_regex_s.should == "a|b"
+ end
+
+ it "should use the correct subexpressions" do
+ one = Char.new("x")
+ two = Char.new("y")
+
+ alternation = Alternation.new(one, two)
+ alternation.to_regex_s.should == "x|y"
+ end
+
+ it "should use more than two subexpressions" do
+ one = Char.new "a"
+ two = Char.new "b"
+ three = Char.new "c"
+
+ alternation = Alternation.new(one, two, three)
+
+ alternation.to_regex_s.should == "a|b|c"
+ end
+ end
+
+ describe "matching a string" do
+ it "should match a with 'a|b'" do
+ alternation = Alternation.new(Char.new("a"), Char.new("b"))
+
+ alternation.matches?("a").should be_true
+ end
+
+ it "should not match a char not present" do
+ alternation = Alternation.new(Char.new("a"), Char.new("b"))
+ alternation.matches?("x").should be_false
+ end
+
+ it "should match 'b' with 'a|b'" do
+ alternation = Alternation.new(Char.new("a"), Char.new("b"))
+
+ alternation.matches?("b").should be_true
+ end
+
+ it "should not match 'ab' with 'a|b'" do
+ alternation = Alternation.new(Char.new("a"), Char.new("b"))
+ alternation.matches?("ab").should be_false
+ end
+ end
+
+ describe "displaying the state table" do
+ it "should not raise an error" do
+ lambda {
+ alternation = Alternation.new(Char.new("a"), Char.new("b"))
+ alternation.inspect
+ }.should_not raise_error
+ end
+
+ it "should keep the same number of states after being called several times" do
+ alternation = Alternation.new(Char.new("a"), Char.new("b"))
+ table = alternation.to_machine.state_table
+
+ lambda {
+ 3.times do
+ table.initial_states
+ end
+ }.should_not change { table.initial_states.size }
+ end
+ end
+ end
+ end
+end
diff --git a/spec/hopcoft/regex/base_spec.rb b/spec/hopcoft/regex/base_spec.rb
new file mode 100644
index 0000000..e793265
--- /dev/null
+++ b/spec/hopcoft/regex/base_spec.rb
@@ -0,0 +1,69 @@
+require File.expand_path(File.dirname(__FILE__) + "/../../spec_helper")
+
+module Hopcroft
+ module Regex
+ describe Base do
+ describe "==" do
+ it "should be false if the other does not respond_to :to_regex_s" do
+ Regex::KleenStar.new("a").should_not == Object.new
+ end
+
+ it "should be false if the other object generates a different regex" do
+ Regex::KleenStar.new(Char.new("a")).should_not == Regex::KleenStar.new(Char.new("b"))
+ end
+
+ it "should be true if the other generates the same regex" do
+ Regex::KleenStar.new(Char.new("a")).should == Regex::KleenStar.new(Char.new("a"))
+ end
+ end
+
+ describe "+" do
+ it "should produce a concatenation of two regexs" do
+ one = Regex::Char.new("a")
+ two = Regex::Char.new("b")
+ concat = one + two
+
+ concat.to_regex_s.should == "ab"
+ end
+
+ it "should use the correct objects" do
+ one = Regex::Char.new("x")
+ two = Regex::Char.new("y")
+
+ (one + two).to_regex_s.should == "xy"
+ end
+ end
+
+ describe "|" do
+ it "should create an alternation" do
+ one = Regex::Char.new("a")
+ two = Regex::Char.new("b")
+
+ (one | two).to_regex_s.should == "a|b"
+ end
+
+ it "should use the correct objects" do
+ one = Regex::Char.new("x")
+ two = Regex::Char.new("y")
+
+ (one | two).to_regex_s.should == "x|y"
+ end
+ end
+
+ describe "to_regexp" do
+ it "should turn the object into a regexp" do
+ Char.new("x").to_regexp.should == /x/
+ end
+
+ it "should use the self" do
+ Char.new("y").to_regexp.should == /y/
+ end
+
+ it "should have #to_regex as an alias" do
+ c = Char.new("a")
+ c.method(:to_regex).should == c.method(:to_regexp)
+ end
+ end
+ end
+ end
+end
diff --git a/spec/hopcoft/regex/char_spec.rb b/spec/hopcoft/regex/char_spec.rb
new file mode 100644
index 0000000..9be86ef
--- /dev/null
+++ b/spec/hopcoft/regex/char_spec.rb
@@ -0,0 +1,88 @@
+require File.expand_path(File.dirname(__FILE__) + "/../../spec_helper")
+
+module Hopcroft
+ module Regex
+ describe Char do
+ it "should match one char" do
+ c = Char.new("a")
+ c.matches?("a").should be_true
+ end
+
+ it "should not match a different char" do
+ c = Char.new("a")
+ c.matches?("b").should be_false
+ end
+
+ it "should not match multiple chars" do
+ c = Char.new("a")
+ c.matches?("ab").should be_false
+ end
+
+ it "should not match an empty string" do
+ c = Char.new("a")
+ c.matches?("").should be_false
+ end
+
+ it "should raise an error if constructed with the empty string" do
+ lambda {
+ Char.new("")
+ }.should raise_error
+ end
+
+ it "should have the char as the regex" do
+ Char.new("a").to_regex_s.should == "a"
+ Char.new("b").to_regex_s.should == "b"
+ end
+
+ it "should escape a ." do
+ Char.new(".").to_regex_s.should == '\.'
+ end
+
+ it "should escape a +" do
+ Char.new("+").to_regex_s.should == '\+'
+ end
+
+ it "should escape a ?" do
+ Char.new("?").to_regex_s.should == '\?'
+ end
+
+ it "should escape a *" do
+ Char.new("*").to_regex_s.should == '\*'
+ end
+
+ it "should escape a [" do
+ Char.new("[").to_regex_s.should == '\['
+ end
+
+ it "should escape a ]" do
+ Char.new("]").to_regex_s.should == '\]'
+ end
+ end
+
+ describe "to_machine" do
+ it "should return a new machine" do
+ char = Char.new("a")
+ char.to_machine.should be_a_kind_of(Machine::StateMachine)
+ end
+
+ it "should construct the one char machine" do
+ char = Char.new("a")
+ start_state = char.to_machine.start_state
+
+ start_state.transitions.size.should == 1
+ first_transition = start_state.transitions.first
+ first_transition.symbol.should == :a
+ first_transition.state.should be_a_final_state
+ end
+
+ it "should use the correct one char" do
+ char = Char.new("b")
+ start_state = char.to_machine.start_state
+
+ start_state.transitions.size.should == 1
+ first_transition = start_state.transitions.first
+ first_transition.symbol.should == :b
+ end
+ end
+ end
+end
diff --git a/spec/hopcoft/regex/charachter_class_spec.rb b/spec/hopcoft/regex/charachter_class_spec.rb
new file mode 100644
index 0000000..aa89873
--- /dev/null
+++ b/spec/hopcoft/regex/charachter_class_spec.rb
@@ -0,0 +1,77 @@
+require File.expand_path(File.dirname(__FILE__) + "/../../spec_helper")
+
+module Hopcroft
+ module Regex
+ describe CharacterClass do
+ describe "checking for valid expressions" do
+ it "should not be valid with e-a" do
+ lambda {
+ CharacterClass.new("e-a")
+ }.should raise_error(CharacterClass::InvalidCharacterClass)
+ end
+
+ it "should return a Char if one char long" do
+ char = CharacterClass.new("a")
+ char.should be_a_kind_of(Regex::Char)
+
+ char.to_regex_s.should == "a"
+ end
+
+ it "should return a char if two chars long, and the first char is an escape" do
+ char = CharacterClass.new("\\a")
+ char.should be_a_kind_of(Regex::Char)
+ end
+
+ it "should be valid with a-e" do
+ klass = CharacterClass.new("a-e")
+ klass.expression.should == "a-e"
+ end
+
+ it "should be invalid if the second char comes before the first in the alphabet" do
+ lambda {
+ CharacterClass.new("b-a")
+ }.should raise_error
+ end
+
+ it "should allow multiple sets of ranges" do
+ lambda {
+ CharacterClass.new("a-zA-Z")
+ }.should_not raise_error
+ end
+
+ it "should have the regex string" do
+ CharacterClass.new("a-c").to_regex_s.should == "[a-c]"
+ end
+ end
+
+ describe "matching" do
+ it "should match an a in [a-z]" do
+ klass = CharacterClass.new("a-z")
+ klass.matches?("a").should be_true
+ end
+
+ it "should match b in [a-z]" do
+ klass = CharacterClass.new("a-z")
+ klass.matches?("b").should be_true
+ end
+
+ it "should match an X in [A-Z]" do
+ klass = CharacterClass.new("A-Z")
+ klass.matches?("X").should be_true
+ end
+
+ it "should not match an a in [A-Z]" do
+ klass = CharacterClass.new("A-Z")
+ klass.matches?("a").should be_false
+ end
+
+ it "should match a number in [0-9]" do
+ klass = CharacterClass.new("0-9")
+ klass.matches?("0").should be_true
+ end
+
+ it "should match in a multi-range expression [0-9a-eA-E]"
+ end
+ end
+ end
+end
diff --git a/spec/hopcoft/regex/concatenation_spec.rb b/spec/hopcoft/regex/concatenation_spec.rb
new file mode 100644
index 0000000..5e6f65c
--- /dev/null
+++ b/spec/hopcoft/regex/concatenation_spec.rb
@@ -0,0 +1,46 @@
+require File.expand_path(File.dirname(__FILE__) + "/../../spec_helper")
+
+module Hopcroft
+ module Regex
+ describe Concatenation do
+ it "should initialize with a series of args" do
+ one, two = mock, mock
+
+ concat = Concatenation.new one, two
+ concat.to_a.should == [one, two]
+ end
+
+ describe "to_regex_s" do
+ it "should return the regex of the two objs" do
+ one = mock :to_regex_s => "foo"
+ two = mock :to_regex_s => "bar"
+
+ Concatenation.new(one, two).to_regex_s.should == "foobar"
+ end
+
+ it "should use the correct regexs" do
+ one = mock :to_regex_s => "a"
+ two = mock :to_regex_s => "b"
+
+ Concatenation.new(one, two).to_regex_s.should == "ab"
+ end
+ end
+
+ describe "matches?" do
+ it "should match a single char" do
+ concat = Concatenation.new(Char.new("a"))
+ concat.matches?("a").should be_true
+ end
+
+ it "should match a regex of the two regexs put together" do
+ one = Char.new("a")
+ two = Char.new("b")
+
+ concat = Concatenation.new(one, two)
+
+ concat.matches?("ab").should be_true
+ end
+ end
+ end
+ end
+end
diff --git a/spec/hopcoft/regex/dot_spec.rb b/spec/hopcoft/regex/dot_spec.rb
new file mode 100644
index 0000000..d37a50d
--- /dev/null
+++ b/spec/hopcoft/regex/dot_spec.rb
@@ -0,0 +1,21 @@
+require File.expand_path(File.dirname(__FILE__) + "/../../spec_helper")
+
+module Hopcroft
+ module Regex
+ describe Dot do
+ it "should accept any one char" do
+ d = Dot.new
+ d.matches?("a").should be_true
+ end
+
+ it "should not accept 0 chars" do
+ d = Dot.new
+ d.matches?("").should be_false
+ end
+
+ it "should have to_regex_s as a dot" do
+ Dot.new.to_regex_s.should == "."
+ end
+ end
+ end
+end
diff --git a/spec/hopcoft/regex/kleen_star_spec.rb b/spec/hopcoft/regex/kleen_star_spec.rb
new file mode 100644
index 0000000..2c28b2b
--- /dev/null
+++ b/spec/hopcoft/regex/kleen_star_spec.rb
@@ -0,0 +1,81 @@
+require File.expand_path(File.dirname(__FILE__) + "/../../spec_helper")
+
+module Hopcroft
+ module Regex
+ describe KleenStar do
+ it "should take a regex" do
+ s = KleenStar.new(Char.new("f"))
+ s.expression.should == Char.new("f")
+ end
+
+ describe "matching" do
+ def new_kleen_star_with_string(str)
+ KleenStar.new(Char.new(str))
+ end
+
+ it "should match 0 chars" do
+ s = new_kleen_star_with_string("a")
+ s.matches?("").should be_true
+ end
+
+ it "should match one char" do
+ s = new_kleen_star_with_string("a")
+ s.matches?("a").should be_true
+ end
+
+ it "should NOT match a different char" do
+ s = new_kleen_star_with_string("a")
+ s.matches?("b").should be_false
+ end
+
+ it "should match many of the same chars" do
+ s = new_kleen_star_with_string("a")
+ s.matches?("aa").should be_true
+ end
+
+ it "should match 10 chars" do
+ s = new_kleen_star_with_string("a")
+ s.matches?("aaaaaaaaaa").should be_true
+ end
+
+ it "should match 'aaaa' with '(a|b)*'" do
+ pending 'TODO'
+ expr = Alternation.new(Char.new("a"), Char.new("b"))
+
+ s = KleenStar.new(expr)
+ s.matches?("aaaa").should be_true
+ end
+
+ it "should match 'bbbb' with '(a|b)*'" do
+ pending 'TODO'
+ expr = Alternation.new(Char.new("a"), Char.new("b"))
+
+ s = KleenStar.new(expr)
+ s.matches?("bbbb").should be_true
+ end
+ end
+
+ it "should have the regex string" do
+ KleenStar.new(Char.new("a")).to_regex_s.should == "a*"
+ end
+
+ it "should be able to output the state table" do
+ star = KleenStar.new(Char.new("a"))
+
+ lambda {
+ star.to_machine.state_table.inspect
+ }.should_not raise_error
+ end
+
+ describe "==" do
+ it "should be true with subexpressions" do
+ one = KleenStar.new(CharacterClass.new("a-z"))
+ two = KleenStar.new(CharacterClass.new("a-z"))
+
+ one.should == two
+ two.should == one
+ end
+ end
+ end
+ end
+end
diff --git a/spec/hopcoft/regex/optional_symbol_spec.rb b/spec/hopcoft/regex/optional_symbol_spec.rb
new file mode 100644
index 0000000..986cfab
--- /dev/null
+++ b/spec/hopcoft/regex/optional_symbol_spec.rb
@@ -0,0 +1,51 @@
+require File.expand_path(File.dirname(__FILE__) + "/../../spec_helper")
+
+module Hopcroft
+ module Regex
+ describe OptionalSymbol do
+ def new_optional_symbol(str)
+ OptionalSymbol.new(Char.new(str))
+ end
+
+ it "should have the expression" do
+ optional = new_optional_symbol("a")
+ optional.expression.should == Char.new("a")
+ end
+
+ it "should have the regex" do
+ optional = new_optional_symbol("a")
+ optional.to_regex_s.should == "a?"
+ end
+
+ it "should use the correct expression in to_regex_s" do
+ optional = new_optional_symbol("b")
+ optional.to_regex_s.should == "b?"
+ end
+
+ it "should match the char if present" do
+ optional = new_optional_symbol("a")
+ optional.matches?("a").should be_true
+ end
+
+ it "should match an empty string" do
+ optional = new_optional_symbol("a")
+ optional.matches?("").should be_true
+ end
+
+ it "should not match a one char input when the char does not match" do
+ optional = new_optional_symbol("a")
+ optional.matches?("b").should be_false
+ end
+
+ it "should not match a two char input" do
+ optional = new_optional_symbol("a")
+ optional.matches?("ab").should be_false
+ end
+
+ it "should match the correct char" do
+ optional = new_optional_symbol("b")
+ optional.matches?("b").should be_true
+ end
+ end
+ end
+end
diff --git a/spec/hopcoft/regex/parser_spec.rb b/spec/hopcoft/regex/parser_spec.rb
new file mode 100644
index 0000000..856b20e
--- /dev/null
+++ b/spec/hopcoft/regex/parser_spec.rb
@@ -0,0 +1,235 @@
+require File.expand_path(File.dirname(__FILE__) + "/../../spec_helper")
+
+module Hopcroft
+ module Regex
+ describe Parser do
+ it "should parse 'a' as a Char" do
+ Parser.parse("a").should == Char.new('a')
+ end
+
+ it "should parse 'b' as a Char" do
+ Parser.parse("b").should == Char.new("b")
+ end
+
+ it "should parse 'ab' as two chars" do
+ Parser.parse("ab").should == (Char.new("a") + Char.new("b"))
+ end
+
+ it "should parse a '.' as a Dot" do
+ Parser.parse(".").should == Dot.new
+ end
+
+ it "should parse a '\.' as a char dot" do
+ Parser.parse('\.').should == Char.new(".")
+ end
+
+ it "should parse '\..' as an escaped char + a dot" do
+ Parser.parse("\\..").should == (Char.new(".") + Dot.new)
+ end
+
+ it "should parse 'a*' as a kleen star" do
+ Parser.parse("a*").should == KleenStar.new(Char.new("a"))
+ end
+
+ it "should parse 'b*' as a kleen star" do
+ Parser.parse("b*").should == KleenStar.new(Char.new("b"))
+ end
+
+ it "should parse '\*' as the star char" do
+ Parser.parse("\\*").should == Char.new("*")
+ end
+
+ it "should parse 'a\*' as a followed by a char" do
+ Parser.parse("a\\*").should == (Char.new("a") + Char.new("*"))
+ end
+
+ it "should parse a? as an optional a" do
+ Parser.parse("a?").should == OptionalSymbol.new(Char.new("a"))
+ end
+
+ it "should parse b? as an optional b" do
+ Parser.parse("b?").should == OptionalSymbol.new(Char.new("b"))
+ end
+
+ it "should parse an escaped question mark as a char" do
+ Parser.parse("\\?").should == Char.new("?")
+ end
+
+ it "should parse a plus" do
+ Parser.parse("a+").should == Plus.new(Char.new("a"))
+ end
+
+ it "should parse 'b+'" do
+ Parser.parse("b+").should == Plus.new(Char.new("b"))
+ end
+
+ it "should parse an escaped plus" do
+ Parser.parse("\\+").should == Char.new("+")
+ end
+
+ it "should parse [a-z] as a character class" do
+ Parser.parse("[a-z]").should == CharacterClass.new("a-z")
+ end
+
+ it "should parse [b-c] as a character class" do
+ Parser.parse("[b-c]").should == CharacterClass.new("b-c")
+ end
+
+ it "should parse \ as an open bracket char" do
+ Parser.parse("\\[").should == Char.new("[")
+ end
+
+ it "should parse \] as a closed bracket char" do
+ Parser.parse("\\]").should == Char.new("]")
+ end
+
+ it "should parse 'ab' as a concatenation of a and b" do
+ char1 = Char.new("a")
+ char2 = Char.new("b")
+
+ Parser.parse("ab").should == Concatenation.new(char1, char2)
+ end
+
+ it "should parse [a-z]* as a kleen star of a char class" do
+ Parser.parse("[a-z]*").should == KleenStar.new(CharacterClass.new("a-z"))
+ end
+
+ it "should parse alternation" do
+ result = Parser.parse("a|b")
+ result.should be_a_kind_of(Alternation)
+ result.should == Alternation.new(Char.new("a"), Char.new("b"))
+ end
+
+ it "should parse correct chars in the alternation" do
+ result = Parser.parse("x|y")
+ result.should be_a_kind_of(Alternation)
+ result.should == Alternation.new(Char.new("x"), Char.new("y"))
+ end
+
+ it "should parse '.|a' as an alternation" do
+ result = Parser.parse(".|a")
+ result.should be_a_kind_of(Alternation)
+ result.should == Alternation.new(Dot.new, Char.new("a"))
+ end
+
+ it "should allow a char class in the second position" do
+ result = Parser.parse(".|[a-z]")
+ result.should be_a_kind_of(Alternation)
+ result.should == Alternation.new(Dot.new, CharacterClass.new("a-z"))
+ result.expressions.last.should be_a_kind_of(CharacterClass)
+ end
+
+ it "should allow a plus after a char class" do
+ result = Parser.parse("[a-z]+")
+ result.should be_a_kind_of(Plus)
+ result.should == Plus.new(CharacterClass.new("a-z"))
+ end
+
+ it "should see an escaped plus as a char" do
+ Parser.parse('\+').should be_a_kind_of(Char)
+ end
+
+ it "should see an escaped plus with a argment in front of it as an escaped plus with a concatenation" do
+ result = Parser.parse('a\+')
+ result.should == Concatenation.new(Char.new("a"), Char.new("+"))
+ end
+
+ it "should allow an optional char class" do
+ result = Parser.parse("[a-z]?")
+ result.should == OptionalSymbol.new(CharacterClass.new("a-z"))
+ end
+
+ it "should parse with parens" do
+ result = Parser.parse("([a-z])")
+ result.should be_a_kind_of(CharacterClass)
+ end
+
+ it "should parse an escaped paren inside parens" do
+ result = Parser.parse("(\\()")
+ result.should == Char.new("(")
+ end
+
+ it "should allow parens around a concatenation" do
+ result = Parser.parse("(ab)")
+ result.should == (Char.new("a") + Char.new("b"))
+ end
+
+ it "should parse matching escaped parens inside a set of parens" do
+ result = Parser.parse '(\(\))'
+ result.should == (Char.new("(") + Char.new(")"))
+ end
+
+ it "should parse two sets of parens around each other" do
+ result = Parser.parse "((ab))"
+ result.should == (Char.new("a") + Char.new("b"))
+ end
+
+ it "should parse a number" do
+ result = Parser.parse("9")
+ result.should == Char.new("9")
+ end
+
+ it "should parse any single non-special char (one that isn't in the regex set)" do
+ result = Parser.parse("$")
+ result.should == Char.new("$")
+ end
+
+ it "should parse an escaped or" do
+ result = Parser.parse('\|')
+ result.should == Char.new("|")
+ end
+
+ it "should parse an underscore" do
+ result = Parser.parse("_")
+ result.should == Char.new("_")
+ end
+
+ it "should parse a char class with one element" do
+ result = Parser.parse("[a]")
+ result.should == Char.new("a")
+ end
+
+ it "should parse an escaped special char inside a character class" do
+ result = Parser.parse('[\+]')
+ result.should be_a_kind_of(Char)
+ result.should == Char.new("+")
+ end
+
+ it "should parse two escaped chars within a char range" do
+ result = Parser.parse '[\a-\b]'
+ result.should be_a_kind_of(CharacterClass)
+ result.should == CharacterClass.new("\\a-\\b")
+ end
+
+ it "should NOT parse an empty char class" do
+ lambda {
+ Parser.parse("[]")
+ }.should raise_error(Parser::ParseError)
+ end
+
+ ["+", "?", "*", "[", "]", "\\", "|"].each do |char|
+ it "should not parse the regex '#{char}'" do
+ lambda {
+ Parser.parse("#{char}")
+ }.should raise_error(Parser::ParseError)
+ end
+ end
+
+ it "should raise an error if it cannot parse a string" do
+ lambda {
+ Parser.parse("[")
+ }.should raise_error(Parser::ParseError, "could not parse the regex '['")
+ end
+
+ it "should use the correct string name" do
+ lambda {
+ Parser.parse("]")
+ }.should raise_error(Parser::ParseError, "could not parse the regex ']'")
+ end
+
+ it "should allow multiple expressions inside a char class (i.e [a-zA-Z])"
+
+ it "should be able to parse multiple ORs (a|b|c)"
+ end
+ end
+end
diff --git a/spec/hopcoft/regex/plus_spec.rb b/spec/hopcoft/regex/plus_spec.rb
new file mode 100644
index 0000000..b1e467e
--- /dev/null
+++ b/spec/hopcoft/regex/plus_spec.rb
@@ -0,0 +1,47 @@
+require File.expand_path(File.dirname(__FILE__) + "/../../spec_helper")
+
+module Hopcroft
+ module Regex
+ describe Plus do
+ it "should take a regex" do
+ s = Plus.new(Char.new("f"))
+ s.expression.should == Char.new("f")
+ end
+
+ describe "matching" do
+ def plus_with_char(str)
+ Plus.new(Char.new(str))
+ end
+
+ it "should not match an empty string" do
+ s = plus_with_char("a")
+ s.matches?("").should be_false
+ end
+
+ it "should match one char" do
+ s = plus_with_char("a")
+ s.matches?("a").should be_true
+ end
+
+ it "should not match a different char" do
+ s = plus_with_char("a")
+ s.matches?("b").should be_false
+ end
+
+ it "should match many of the same chars" do
+ s = plus_with_char("a")
+ s.matches?("aa").should be_true
+ end
+
+ it "should not match when any of the chars are different" do
+ s = plus_with_char("a")
+ s.matches?("aab").should be_false
+ end
+ end
+
+ it "should have the regex string" do
+ Plus.new(Char.new("a")).to_regex_s.should == "a+"
+ end
+ end
+ end
+end
diff --git a/spec/spec.opts b/spec/spec.opts
new file mode 100644
index 0000000..5052887
--- /dev/null
+++ b/spec/spec.opts
@@ -0,0 +1 @@
+--color
\ No newline at end of file
diff --git a/spec/spec_helper.rb b/spec/spec_helper.rb
new file mode 100644
index 0000000..83801f2
--- /dev/null
+++ b/spec/spec_helper.rb
@@ -0,0 +1,4 @@
+
+require "rubygems"
+require File.expand_path(File.dirname(__FILE__) + "/../lib/hopcroft")
+require 'facets/dictionary'
diff --git a/tasks/flog.rake b/tasks/flog.rake
new file mode 100644
index 0000000..e31fd7d
--- /dev/null
+++ b/tasks/flog.rake
@@ -0,0 +1,10 @@
+desc "Feel the pain of my code, and submit a refactoring patch"
+task :flog do
+ puts %x(find lib | grep ".rb$" | xargs flog)
+end
+
+task :flog_to_disk => :create_doc_directory do
+ puts "Flogging..."
+ %x(find lib | grep ".rb$" | xargs flog > doc/flog.txt)
+ puts "Done Flogging...\n"
+end
\ No newline at end of file
diff --git a/tasks/rdoc.rake b/tasks/rdoc.rake
new file mode 100644
index 0000000..6233cc1
--- /dev/null
+++ b/tasks/rdoc.rake
@@ -0,0 +1,16 @@
+require 'rake'
+require 'hanna/rdoctask'
+
+DOC_DIRECTORY = File.dirname(__FILE__) + "/../doc"
+
+Rake::RDocTask.new do |rdoc|
+ rdoc.rdoc_dir = DOC_DIRECTORY
+ rdoc.title = 'Hopcroft'
+ rdoc.options << '--line-numbers' << '--inline-source'
+
+ rdoc.options << '--webcvs=http://github.com/mislav/will_paginate/tree/master/'
+
+ ["README.rdoc", "GPL_LICENSE", "MIT_LICENSE", "lib/**/*.rb"].each do |file|
+ rdoc.rdoc_files.include(file)
+ end
+end
diff --git a/tasks/rspec.rake b/tasks/rspec.rake
new file mode 100644
index 0000000..037b6e5
--- /dev/null
+++ b/tasks/rspec.rake
@@ -0,0 +1,20 @@
+require 'spec/rake/spectask'
+require 'spec/rake/verify_rcov'
+
+desc 'Run the specs'
+Spec::Rake::SpecTask.new do |t|
+ t.warning = false
+ t.spec_opts = ["--color"]
+end
+
+desc "Create the html specdoc"
+Spec::Rake::SpecTask.new(:specdoc => :create_doc_directory) do |t|
+ t.spec_opts = ["--format", "html:doc/specdoc.html"]
+end
+
+desc "Run all examples with RCov"
+Spec::Rake::SpecTask.new(:rcov) do |t|
+ t.rcov = true
+ t.rcov_opts = ['--exclude', 'spec']
+ t.rcov_dir = "doc/rcov"
+end
\ No newline at end of file
diff --git a/tasks/sloc.rake b/tasks/sloc.rake
new file mode 100644
index 0000000..fb7f16a
--- /dev/null
+++ b/tasks/sloc.rake
@@ -0,0 +1,16 @@
+def sloc
+ `sloccount #{File.dirname(__FILE__)}/../lib #{File.dirname(__FILE__)}/../ext`
+end
+
+desc "Output sloccount report. You'll need sloccount installed."
+task :sloc do
+ puts "Counting lines of code"
+ puts sloc
+end
+
+desc "Write sloccount report"
+task :output_sloc => :create_doc_directory do
+ File.open(File.dirname(__FILE__) + "/doc/lines_of_code.txt", "w") do |f|
+ f << sloc
+ end
+end
\ No newline at end of file
diff --git a/tasks/tags.rake b/tasks/tags.rake
new file mode 100644
index 0000000..54c0315
--- /dev/null
+++ b/tasks/tags.rake
@@ -0,0 +1,23 @@
+# Build the TAGS file for Emacs
+# Taken with slight modifications from
+# http://blog.lathi.net/articles/2007/11/07/navigating-your-projects-in-emacs
+#
+# Thanks Jim Weirich
+
+module Emacs
+ module Tags
+ def self.ruby_files
+ @ruby_files ||= FileList['**/*.rb'].exclude("pkg")
+ end
+ end
+end
+
+namespace :tags do
+ task :emacs do
+ puts "Making Emacs TAGS file"
+ sh "ctags -e #{Emacs::Tags.ruby_files}", :verbose => false
+ end
+end
+
+desc "Build the emacs tags file"
+task :tags => ["tags:emacs"]
|
btbytes/ci
|
4c167913a3d05528292ef57033c67af33b88c0c3
|
adding VC Project file to PSOS
|
diff --git a/psos/PSOS.vcproj b/psos/PSOS.vcproj
new file mode 100644
index 0000000..b569245
--- /dev/null
+++ b/psos/PSOS.vcproj
@@ -0,0 +1,212 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<VisualStudioProject
+ ProjectType="Visual C++"
+ Version="9.00"
+ Name="PSOS"
+ ProjectGUID="{3CE314D3-DF51-428F-B57C-68BD1F33B490}"
+ Keyword="Win32Proj"
+ TargetFrameworkVersion="0"
+ >
+ <Platforms>
+ <Platform
+ Name="Win32"
+ />
+ </Platforms>
+ <ToolFiles>
+ </ToolFiles>
+ <Configurations>
+ <Configuration
+ Name="Debug|Win32"
+ OutputDirectory="Debug"
+ IntermediateDirectory="Debug"
+ ConfigurationType="1"
+ >
+ <Tool
+ Name="VCPreBuildEventTool"
+ />
+ <Tool
+ Name="VCCustomBuildTool"
+ />
+ <Tool
+ Name="VCXMLDataGeneratorTool"
+ />
+ <Tool
+ Name="VCWebServiceProxyGeneratorTool"
+ />
+ <Tool
+ Name="VCMIDLTool"
+ />
+ <Tool
+ Name="VCCLCompilerTool"
+ Optimization="0"
+ PreprocessorDefinitions="WIN32;_DEBUG;_CONSOLE;"
+ MinimalRebuild="true"
+ BasicRuntimeChecks="3"
+ RuntimeLibrary="3"
+ UsePrecompiledHeader="0"
+ WarningLevel="3"
+ Detect64BitPortabilityProblems="true"
+ DebugInformationFormat="4"
+ />
+ <Tool
+ Name="VCManagedResourceCompilerTool"
+ />
+ <Tool
+ Name="VCResourceCompilerTool"
+ />
+ <Tool
+ Name="VCPreLinkEventTool"
+ />
+ <Tool
+ Name="VCLinkerTool"
+ LinkIncremental="2"
+ GenerateDebugInformation="true"
+ SubSystem="1"
+ TargetMachine="1"
+ />
+ <Tool
+ Name="VCALinkTool"
+ />
+ <Tool
+ Name="VCManifestTool"
+ />
+ <Tool
+ Name="VCXDCMakeTool"
+ />
+ <Tool
+ Name="VCBscMakeTool"
+ />
+ <Tool
+ Name="VCFxCopTool"
+ />
+ <Tool
+ Name="VCAppVerifierTool"
+ />
+ <Tool
+ Name="VCPostBuildEventTool"
+ />
+ </Configuration>
+ <Configuration
+ Name="Release|Win32"
+ OutputDirectory="Release"
+ IntermediateDirectory="Release"
+ ConfigurationType="1"
+ >
+ <Tool
+ Name="VCPreBuildEventTool"
+ />
+ <Tool
+ Name="VCCustomBuildTool"
+ />
+ <Tool
+ Name="VCXMLDataGeneratorTool"
+ />
+ <Tool
+ Name="VCWebServiceProxyGeneratorTool"
+ />
+ <Tool
+ Name="VCMIDLTool"
+ />
+ <Tool
+ Name="VCCLCompilerTool"
+ PreprocessorDefinitions="WIN32;NDEBUG;_CONSOLE;"
+ RuntimeLibrary="2"
+ UsePrecompiledHeader="0"
+ WarningLevel="3"
+ Detect64BitPortabilityProblems="true"
+ DebugInformationFormat="3"
+ />
+ <Tool
+ Name="VCManagedResourceCompilerTool"
+ />
+ <Tool
+ Name="VCResourceCompilerTool"
+ />
+ <Tool
+ Name="VCPreLinkEventTool"
+ />
+ <Tool
+ Name="VCLinkerTool"
+ LinkIncremental="2"
+ GenerateDebugInformation="true"
+ SubSystem="1"
+ OptimizeReferences="2"
+ EnableCOMDATFolding="2"
+ TargetMachine="1"
+ />
+ <Tool
+ Name="VCALinkTool"
+ />
+ <Tool
+ Name="VCManifestTool"
+ />
+ <Tool
+ Name="VCXDCMakeTool"
+ />
+ <Tool
+ Name="VCBscMakeTool"
+ />
+ <Tool
+ Name="VCFxCopTool"
+ />
+ <Tool
+ Name="VCAppVerifierTool"
+ />
+ <Tool
+ Name="VCPostBuildEventTool"
+ />
+ </Configuration>
+ </Configurations>
+ <References>
+ </References>
+ <Files>
+ <Filter
+ Name="Header Files"
+ Filter="h;hpp;hxx;hm;inl;inc;xsd"
+ UniqueIdentifier="{93995380-89BD-4b04-88EB-625FBE52EBFB}"
+ >
+ <File
+ RelativePath=".\definiti.h"
+ >
+ </File>
+ <File
+ RelativePath=".\headfile.h"
+ >
+ </File>
+ <File
+ RelativePath=".\mem_loc.h"
+ >
+ </File>
+ <File
+ RelativePath=".\psostate.h"
+ >
+ </File>
+ </Filter>
+ <Filter
+ Name="Resource Files"
+ Filter="rc;ico;cur;bmp;dlg;rc2;rct;bin;rgs;gif;jpg;jpeg;jpe;resx"
+ UniqueIdentifier="{67DA6AB6-F800-4c08-8B7A-83BB121AAD01}"
+ >
+ </Filter>
+ <Filter
+ Name="Source Files"
+ Filter="cpp;c;cc;cxx;def;odl;idl;hpj;bat;asm;asmx"
+ UniqueIdentifier="{4FC737F1-C7A5-4376-A066-2A32D752A2FF}"
+ >
+ <File
+ RelativePath=".\main.c"
+ >
+ </File>
+ <File
+ RelativePath=".\mem_loc.c"
+ >
+ </File>
+ <File
+ RelativePath=".\psostate.c"
+ >
+ </File>
+ </Filter>
+ </Files>
+ <Globals>
+ </Globals>
+</VisualStudioProject>
|
btbytes/ci
|
0a68a321eef8fdd47ce4293e6a311815f939aff5
|
adding example run file for Genetic Algorithms
|
diff --git a/ga_bin/ga.run b/ga_bin/ga.run
new file mode 100644
index 0000000..b26213c
--- /dev/null
+++ b/ga_bin/ga.run
@@ -0,0 +1,12 @@
+results.out
+10
+4
+10000
+16
+20
+0.75
+0.005
+0.02
+0
+2
+1
|
btbytes/ci
|
6d86e5e0fbaa5868bb2c49fe028e709fd15a86aa
|
adding Fuzzy Evolutionary fuzzy rule system; NOT working on linux yet
|
diff --git a/fu_ga_fs/Makefile b/fu_ga_fs/Makefile
new file mode 100644
index 0000000..1f64fb0
--- /dev/null
+++ b/fu_ga_fs/Makefile
@@ -0,0 +1,3 @@
+all: array.h extern.h headfile.h misc.h popuint.h variablf.h chromint.h fuzzyrul.h memberf.h mystring.h ruleset.h vector.h
+ gcc -Wall -lm array.cpp average.cpp chromint.cpp fl.cpp flga.cpp fuzzyrul.cpp ga.cpp memberf.cpp mystring.cpp popuint.cpp ruleset.cpp variablf.cpp variance.cpp -o fu_ga_fs
+
diff --git a/fu_ga_fs/array.cpp b/fu_ga_fs/array.cpp
new file mode 100644
index 0000000..c675eca
--- /dev/null
+++ b/fu_ga_fs/array.cpp
@@ -0,0 +1,327 @@
+#include "headfile.h"
+#include "array.h"
+
+//constructors
+array::array(int a,int b):
+row(a),col(b) {
+ arr=new float[row*col];
+ assert(arr!=0);
+ for (int i=0;i<row;i++)
+ for (int j=0;j<col;j++)
+ *(arr+i*col+j)=0;
+}
+
+array::array(const array& a):
+row(a.row),col(a.col) {
+ arr = new float[row*col];
+ assert(arr !=0);
+ for (int i=0;i<row;i++)
+ for (int j=0;j<col;j++)
+ *(arr+i*col+j)=*(a.arr+i*(a.col)+j);
+}
+
+//arithmetic operation
+float* array::operator [] (int i) const
+{assert(i>=0 && i<row); return &arr[i*col];}
+
+array&
+array::changeSize(const int& r, const int& c)
+{
+ delete []arr;
+ row = r;
+ col = c;
+ arr = new float[row*col];
+ assert(arr !=0);
+ for (int i=0;i<row;i++)
+ for (int j=0;j<col;j++)
+ *(arr+i*col+j)=0;
+ return *this;
+}
+
+vector<int>
+array::max_index() const
+{
+ vector<int> tmp(2);
+ float max=*(arr)-2;
+ for (int i=0;i<row;i++)
+ for (int j=0;j<col;j++)
+ {
+ if (max<(*(arr+i*col+j)))
+ {
+ max=*(arr+i*col+j);
+ tmp[0]=i;
+ tmp[1]=j;
+ }
+ }
+
+ return tmp;
+}
+
+float
+array::sum() const
+{
+ float tmp=0;
+ for (int i=0;i<row;i++)
+ for (int j=0;j<col;j++)
+ tmp +=*(arr+i*col+j);
+ return tmp;
+}
+
+array&
+array::operator +=(const array& a)
+{
+ if ((row !=a.row)||(col !=a.col))
+ {
+ cerr<<"adding two arrays with different dimention"<<endl;
+ exit(0);
+ }
+ for (int i=0;i<row;i++)
+ for (int j=0;j<col;j++)
+ *(arr+i*col+j) +=a[i][j];
+ return *this;
+}
+
+array&
+array::operator -=(const array& a)
+{
+ if ((row !=a.row)||(col !=a.col))
+ {
+ cerr<<"subtracting two arrays with different dimention"<<endl;
+ exit(0);
+ }
+ for (int i=0;i<row;i++)
+ for (int j=0;j<col;j++)
+ *(arr+i*col+j) -=a[i][j];
+ return *this;
+}
+
+array&
+array::operator *=(const array& a)
+{
+ if (col !=a.row)
+ {
+ cerr<<"multify two nonconformant arrays"<<endl;
+ exit(0);
+ }
+ array newArr=*this;
+ for (int i=0;i<row;i++)
+ for (int j=0;j<a.col;j++)
+ {
+ float sum=0.0;
+ for (int k=0;k<col;k++)
+ sum +=newArr[i][k]*a[k][j];
+ *(arr+i*a.col+j) =sum;
+ }
+ col=a.col;
+ return *this;
+}
+
+array&
+array::operator *=(const float& a)
+{
+ for (int i=0;i<row;i++)
+ for (int j=0;j<col;j++)
+ *(arr+i*col+j) *=a;
+ return *this;
+}
+
+array&
+array::operator =(const array& a)
+{
+ if ((&a)==this) return *this;
+ delete []arr;
+ row=a.row;
+ col=a.col;
+ arr = new float [row*col];
+ assert(arr !=0);
+ for (int i=0;i<row;i++)
+ for (int j=0;j<col;j++)
+ *(arr+i*col+j)=a[i][j];
+ return *this;
+}
+
+array&
+array::operator =(const float& a)
+{
+ for (int i=0;i<row;i++)
+ for (int j=0;j<col;j++)
+ *(arr+i*col+j)=a;
+ return *this;
+}
+
+array&
+array::operator =(const vector<float>& a)
+{
+ if ((col !=1)||(row !=a.len()))
+ {
+ cerr<<"array in-compabaility with vector"<<endl;
+ exit(0);
+ }
+ for (int i=0;i<row;i++)
+ *(arr+i)=a[i];
+ return *this;
+}
+
+array
+array::noise(const float& a,const float& b) const
+{
+ time_t t;
+ srand((unsigned) time(&t));
+
+ array newArr(row,col);
+ for (int i=0;i<row;i++)
+ for (int j=0;j<col;j++)
+ {
+ float prob=(1.0*random(1000))/1000;
+ newArr[i][j] +=a*prob + b;
+ }
+ return newArr;
+}
+
+//transpose
+array
+array::t() const
+{
+ array newArr(col,row);
+ for (int i=0;i<newArr.row;i++)
+ for (int j=0;j<newArr.col;j++)
+ newArr[i][j]=*(arr+j*col+i);
+ return newArr;
+}
+
+//element-wise square
+array
+array::square() const
+{
+ array newArr(row,col);
+ for (int i=0;i<row;i++)
+ for (int j=0;j<col;j++)
+ newArr[i][j]=(*(arr+i*col+j))*(*(arr+i*col+j));
+ return newArr;
+}
+
+
+//X.map(f) returns element by element mapping f(X)
+array
+array::map(float (*f) (float)) const
+{
+ array newArr(row,col);
+ for (int i=0;i<row;i++)
+ for (int j=0;j<col;j++)
+ newArr[i][j]=f((*(arr+i*col+j)));
+ return newArr;
+}
+
+array operator % (const array& a,const array& b)
+{
+ if ((a.row !=b.row)||(a.col !=b.col))
+ {
+ cerr<<"inner product of non-compabability arrays"<<endl;
+ exit(-1);
+ }
+ array newArr(a.row,a.col);
+ for (int i=0;i<a.row;i++)
+ for (int j=0;j<a.col;j++)
+ newArr[i][j]=a[i][j]*b[i][j];
+ return newArr;
+}
+
+array operator * (const float& a, const array& b)
+{
+ array newArr(b.row,b.col);
+ for (int i=0;i<b.row;i++)
+ for (int j=0;j<b.col;j++)
+ newArr[i][j]=a*b[i][j];
+ return newArr;
+}
+
+array operator * (const array& a, const float& b)
+{
+ array newArr(a.row,a.col);
+ for (int i=0;i<a.row;i++)
+ for (int j=0;j<a.col;j++)
+ newArr[i][j]=a[i][j]*b;
+ return newArr;
+}
+
+array operator * (const array& a, const array& b)
+{
+ if (a.col !=b.row)
+ {
+ cerr<<"multify two arrays nonconformant"<<endl;
+ exit(0);
+ }
+ array newArr(a.row,b.col);
+ for (int i=0;i<a.row;i++)
+ for (int j=0;j<b.col;j++)
+ for (int k=0;k<a.col;k++)
+ newArr[i][j] +=a[i][k]*b[k][j];
+ return newArr;
+}
+
+array operator + (const array& a, const array& b)
+{
+ if ((a.row !=b.row)||(a.col !=b.col))
+ {
+ cerr<<"adding two arrays with different dimensions"<<endl;
+ exit(0);
+ }
+ array newArr(a.row,a.col);
+ for (int i=0;i<a.row;i++)
+ for (int j=0;j<a.col;j++)
+ newArr[i][j] = a[i][j]+b[i][j];
+ return newArr;
+}
+
+array operator - (const array& a, const array& b)
+{
+ if ((a.row !=b.row)||(a.col !=b.col))
+ {
+ cerr<<"adding two arrays with different dimensions"<<endl;
+ exit(0);
+ }
+ array newArr(a.row,a.col);
+ for (int i=0;i<a.row;i++)
+ for (int j=0;j<a.col;j++)
+ newArr[i][j] = a[i][j]-b[i][j];
+ return newArr;
+}
+
+array operator - (const float& a,const array& b)
+{
+ array newArr(b.row,b.col);
+ for (int i=0;i<b.row;i++)
+ for (int j=0;j<b.col;j++)
+ newArr[i][j]=a-b[i][j];
+ return newArr;
+}
+
+array operator - (const array& a,const float& b)
+{
+ array newArr(a.row,a.col);
+ for (int i=0;i<a.row;i++)
+ for (int j=0;j<a.col;j++)
+ newArr[i][j]=a[i][j]-b;
+ return newArr;
+}
+
+istream& operator >> (istream& is,array& a)
+{
+ for (int i=0;i<a.row;i++)
+ for (int j=0;j<a.col;j++)
+ is >> a[i][j];
+ return is;
+}
+
+ostream& operator << (ostream& os,const array& a)
+{
+ for (int i=0;i<a.row;i++)
+ {
+ for (int j=0;j<a.col;j++)
+ os << a[i][j]<<"\t";
+ os<<endl;
+ }
+ os << endl;
+ return os;
+}
+
diff --git a/fu_ga_fs/array.h b/fu_ga_fs/array.h
new file mode 100644
index 0000000..26f86e8
--- /dev/null
+++ b/fu_ga_fs/array.h
@@ -0,0 +1,60 @@
+#ifndef __ARRAY_H__
+#define __ARRAY_H__
+
+#include "vector.h"
+
+class array
+{
+private:
+ int row; //length of array (column);
+ int col; //wide of array (row);
+ float* arr; //pointer to the array;
+
+public:
+ array():row(0),col(0),arr(0) {}
+ array(int a,int b);
+ array(const array& a);
+ ~array(){delete []arr;}
+
+ array& operator =(const array& a);
+ array& operator =(const float& a);
+ array& operator =(const vector<float>& a);
+ float* operator [] (int i) const;
+
+ array& changeSize(const int& r,const int& c);
+ int len() const {return row;}
+ int wid() const {return col;}
+ float* poi() const {return arr;}
+ //find the index of maximum element
+ vector<int> max_index() const; //vec[0]:row, vec[1]:col
+ //sum of all elements
+ float sum() const;
+ //add noise
+ array noise(const float& a,const float& b) const;
+ //element-wise square
+ array square() const;
+ //transpose
+ array t() const;
+
+ //arithmetic operation
+ array& operator +=(const array& a);
+ array& operator -=(const array& a);
+ array& operator *=(const array& a);
+ array& operator *=(const float& a);
+
+ //X.map(f) returns element by element mapping f(X)
+ array map(float (*f)(float)) const;
+
+ friend array operator * (const float& a, const array& b);
+ friend array operator * (const array& a, const float& b);
+ friend array operator * (const array& a, const array& b);
+ friend array operator % (const array& a,const array& b);
+ friend array operator + (const array& a, const array& b);
+ friend array operator - (const array& a, const array& b);
+ friend array operator - (const float& a,const array& b);
+ friend array operator - (const array& a, const float& b);
+ friend istream& operator >> (istream& is,array& a);
+ friend ostream& operator << (ostream& os,const array& a);
+};
+
+#endif // __ARRAY_H__
diff --git a/fu_ga_fs/average.cpp b/fu_ga_fs/average.cpp
new file mode 100644
index 0000000..4997227
--- /dev/null
+++ b/fu_ga_fs/average.cpp
@@ -0,0 +1,11 @@
+#include "vector.h"
+
+float
+average(vector<float> a)
+{
+ float aver=0.0;
+ for (int i=0;i<a.len();i++)
+ aver +=a[i];
+ aver =aver/a.len();
+ return(aver);
+}
diff --git a/fu_ga_fs/chromint.cpp b/fu_ga_fs/chromint.cpp
new file mode 100644
index 0000000..e020940
--- /dev/null
+++ b/fu_ga_fs/chromint.cpp
@@ -0,0 +1,965 @@
+#include "headfile.h"
+#include "chromint.h"
+
+//constructors
+IndividualInt::IndividualInt(int a,float b):
+length(a),m_rate(b)
+{
+ ptr=new int[length];
+ assert(ptr !=0);
+}
+
+IndividualInt::IndividualInt(int a,int* b,float c):
+length(a),m_rate(c)
+{
+ ptr=new int[length];
+ assert(ptr !=0);
+ for (int i=0;i<length;i++)
+ ptr[i]=b[i];
+}
+
+IndividualInt::IndividualInt(vector<int> a, float b)
+{
+ length=a.len();
+ m_rate=b;
+ ptr =new int[length];
+ assert(ptr !=0);
+ for (int i=0;i<length;i++)
+ ptr[i]=a[i];
+}
+
+IndividualInt::IndividualInt(const IndividualInt& a):
+length(a.length),m_rate(a.m_rate)
+{
+ ptr=new int[length];
+ assert(ptr !=0);
+ for (int i=0;i<length;i++)
+ ptr[i]=a.ptr[i];
+}
+
+//member functions
+IndividualInt&
+IndividualInt::change_length(const int& a)
+{
+ length=a;
+ return *this;
+}
+
+IndividualInt&
+IndividualInt::change_mrate(const float& a)
+{
+ m_rate=a;
+ return *this;
+}
+
+IndividualInt&
+IndividualInt::initialize(const int& a,const int& b)
+{
+ for (int i=0;i<length;i++)
+ ptr[i] =random(a+1) + b;
+ return *this;
+}
+
+IndividualInt&
+IndividualInt::initialize_range(const IndividualInt& a)
+{
+ if (((length-1)%(a.length-1)) !=0)
+ {
+ cerr<<"in initialize, length of individual is not exact times of length of range individual"<<endl;
+ cout<<" length="<<length<<" a.length="<<a.length<<endl;
+ exit(1);
+ }
+ int mui=(length-1)/(a.length-1);
+ ptr[0]=random(a.ptr[0])+1;
+ for (int i=0;i<mui;i++)
+ for (int j=1;j<a.length;j++)
+ ptr[i*(a.length-1)+j]=random(2*a.ptr[j]+1)-a.ptr[j];
+ return *this;
+}
+
+IndividualInt&
+IndividualInt::initialize_range_RM(const IndividualInt& a)
+{
+ ptr[0]=random(a.ptr[0])+1; //rule number
+ int tmplen=1;
+ for (int i=2;i<a.length;i++)
+ tmplen +=2*a[i];
+ if (((length-tmplen)%(a.length-2)) !=0)
+ {
+ cerr<<"in initialize, length of individual is not exact times of length of range individual"<<endl;
+ cout<<" length="<<length<<" a.length="<<a.length<<endl;
+ exit(1);
+ }
+ //initialize membership part
+ for (i=1;i<tmplen;i++)
+ ptr[i]=random(a.ptr[1]);
+ //initialize rule part
+ int mui=(length-tmplen)/(a.length-2);
+ for (i=0;i<mui;i++)
+ for (int j=2;j<a.length;j++)
+ ptr[tmplen+i*(a.length-2)+j-2]=random(2*a.ptr[j]+1)-a.ptr[j];
+ return *this;
+}
+
+IndividualInt&
+IndividualInt::initialize_range_RMT(const IndividualInt& a)
+{
+ ptr[0]=random(a.ptr[0])+1; //rule number
+ int tmplen=1;
+ for (int i=3;i<a.length;i++)
+ tmplen +=3*a[i]; //each funtion having two points and one type
+ if (((length-tmplen)%(a.length-3)) !=0)
+ {
+ cerr<<"in initialize, length of individual is not exact times of length of range individual"<<endl;
+ cout<<" length="<<length<<" a.length="<<a.length<<endl;
+ exit(1);
+ }
+ //initialize membership part
+ int inde=3;
+ int ch=0;
+ for (i=1;i<tmplen;i++)
+ {
+ if ((i%3)==0)
+ {
+ ch++;
+ ptr[i]=random(a.ptr[2])+1; //1 to a.ptr[2]
+ if (ch==1)
+ {
+ if ((ptr[i]==2)||(ptr[i]==4))
+ ptr[i]++;
+ }
+ else if ((ch>1)&&(ch<a[inde]))
+ {
+ if (ptr[i]<=3)
+ ptr[i]=3;
+ else
+ ptr[i]=6;
+ }
+ else
+ {
+ inde++;
+ ch=0;
+ if ((ptr[i]==1)||(ptr[i]==5))
+ ptr[i]++;
+ }
+ }
+ else
+ ptr[i]=random(a.ptr[1]);
+ }
+ //initialize rule part
+ int mui=(length-tmplen)/(a.length-3);
+ for (i=0;i<mui;i++)
+ for (int j=3;j<a.length;j++)
+ ptr[tmplen+i*(a.length-3)+j-3]=random(2*a.ptr[j]+1)-a.ptr[j];
+ return *this;
+}
+
+
+IndividualInt&
+IndividualInt::mutate_one(const IndividualInt& a)
+{
+ //mutate rule number part
+ float prob=(1.0*random(1000))/1000;
+ if (prob<m_rate)
+ {
+ prob=(1.0*random(1000))/1000;
+ if (prob>0.5)
+ {
+ //add one
+ if (ptr[0]<a[0])
+ ptr[0]++;
+ else
+ ptr[0]=1;
+ }
+ else
+ {
+ //subtract one
+ if (ptr[0]>1)
+ ptr[0]--;
+ else
+ ptr[0]=a[0];
+ }
+ }
+ // mutate rule part
+ for (int i=1;i<length;i++)
+ {
+ prob=(1.0*random(1000))/1000;
+ if (prob<m_rate)
+ {
+ int tmp=(i-1)%(a.length-1);
+ prob=(1.0*random(1000))/1000;
+ if (prob>0.5)
+ {
+ //add one
+ if (ptr[i]<a[tmp+1])
+ ptr[i]++;
+ else
+ ptr[i]=-1*a[tmp+1];
+ }
+ else
+ {
+ //subtract one
+ if (ptr[i]>(-1*a[tmp+1]))
+ ptr[i]--;
+ else
+ ptr[i]=a[tmp+1];
+ }
+ }
+ }
+ return *this;
+}
+
+IndividualInt&
+IndividualInt::mutate_one_RM(const IndividualInt& a)
+{
+ //mutate rule number part
+ float prob=(1.0*random(1000))/1000;
+ if (prob<m_rate)
+ {
+ prob=(1.0*random(1000))/1000;
+ if (prob>0.5)
+ {
+ //add one
+ if (ptr[0]<a[0])
+ ptr[0]++;
+ else
+ ptr[0]=1;
+ }
+ else
+ {
+ //subtract one
+ if (ptr[0]>1)
+ ptr[0]--;
+ else
+ ptr[0]=a[0];
+ }
+ }
+ //mutate membership functions part
+ int tmplen=1;
+ for (int i=2;i<a.length;i++)
+ tmplen +=2*a[i];
+ for (i=1;i<tmplen;i++)
+ {
+ prob=(1.0*random(1000))/1000;
+ if (prob<m_rate)
+ {
+ prob=(1.0*random(1000))/1000;
+ if (prob>0.5)
+ {
+ //add one
+ ptr[i]++;
+ if (ptr[i]>=a[1])
+ ptr[i]=0;
+ }
+ }
+ else
+ {
+ //subtract one
+ if (ptr[i]>0)
+ ptr[i]--;
+ else
+ ptr[i]=a[1]-1;
+ }
+ }
+ // mutate rule part
+ for (i=tmplen;i<length;i++)
+ {
+ prob=(1.0*random(1000))/1000;
+ if (prob<m_rate)
+ {
+ int tmp=(i-tmplen)%(a.length-2);
+ prob=(1.0*random(1000))/1000;
+ if (prob>0.5)
+ {
+ //add one
+ if (ptr[i]<a[tmp+2])
+ ptr[i]++;
+ else
+ ptr[i]=-1*a[tmp+2];
+ }
+ else
+ {
+ //subtract one
+ if (ptr[i]>(-1*a[tmp+2]))
+ ptr[i]--;
+ else
+ ptr[i]=a[tmp+2];
+ }
+ }
+ }
+ return *this;
+}
+
+IndividualInt&
+IndividualInt::mutate_one_RMT(const IndividualInt& a)
+{
+ //mutate rule number part
+ float prob=(1.0*random(1000))/1000;
+ if (prob<m_rate)
+ {
+ prob=(1.0*random(1000))/1000;
+ if (prob>0.5)
+ {
+ //add one
+ if (ptr[0]<a[0])
+ ptr[0]++;
+ else
+ ptr[0]=1;
+ }
+ else
+ {
+ //subtract one
+ if (ptr[0]>1)
+ ptr[0]--;
+ else
+ ptr[0]=a[0];
+ }
+ }
+ //mutate membership functions part
+ int tmplen=1;
+ for (int i=3;i<a.length;i++)
+ tmplen +=3*a[i];
+ int ch=0;
+ int inde=3;
+ for (i=1;i<tmplen;i++)
+ {
+ prob=(1.0*random(1000))/1000;
+ if (prob<m_rate)
+ {
+ prob=(1.0*random(1000))/1000;
+ if (prob>0.5)
+ {
+ //add one
+ ptr[i]++;
+ if ((i%3)==0) //mutate type
+ {
+ ch++;
+ if (ch==1)
+ {
+ if (ptr[i]>a[2])
+ ptr[i]=1;
+ if ((ptr[i]==2)||(ptr[i]==4))
+ ptr[i]++;
+ }
+ else if ((ch>1)&&(ch<a[inde]))
+ {
+ if (ptr[i]>a[2])
+ ptr[i]=3;
+ if ((ptr[i]==1)||(ptr[i]==2))
+ ptr[i]=3;
+ if ((ptr[i]==4)||(ptr[i]==5))
+ ptr[i]=6;
+ }
+ else if (ch==a[inde])
+ {
+ inde++;
+ ch=0;
+ if (ptr[i]>a[2])
+ ptr[i]=2;
+ if ((ptr[i]==1)||(ptr[i]==5))
+ ptr[i]++;
+ }
+ }
+ else //tune membership function
+ {
+ if (ptr[i]>=a[1])
+ ptr[i]=0;
+ }
+ }
+ else
+ {
+ //subtract one
+ ptr[i]--;
+ if ((i%3)==0) //mutate type
+ {
+ ch++;
+ if (ch==1)
+ {
+ if (ptr[i]<1)
+ ptr[i]=a[2];
+ if ((ptr[i]==2)||(ptr[i]==4))
+ ptr[i]--;
+ }
+ else if ((ch>1)&&(ch<a[inde]))
+ {
+ if (ptr[i]<1)
+ ptr[i]=6;
+ if ((ptr[i]==1)||(ptr[i]==2))
+ ptr[i]=6;
+ if ((ptr[i]==4)||(ptr[i]==5))
+ ptr[i]=3;
+ }
+ else if (ch==a[inde])
+ {
+ inde++;
+ ch=0;
+ if ((ptr[i]==1)||(ptr[i]==5))
+ ptr[i]--;
+ if (ptr[i]<1)
+ ptr[i]=6;
+ }
+ }
+ else //tune membership function
+ {
+ if (ptr[i]<0)
+ ptr[i]=a[1]-1;
+ }
+ }
+ }
+ else
+ {
+ if ((i%3)==0)
+ {
+ ch++;
+ if (ch==a[inde])
+ {
+ ch=0;
+ inde++;
+ }
+ }
+ }
+ }
+ // mutate rule part
+ for (i=tmplen;i<length;i++)
+ {
+ prob=(1.0*random(1000))/1000;
+ if (prob<m_rate)
+ {
+ int tmp=(i-tmplen)%(a.length-3);
+ prob=(1.0*random(1000))/1000;
+ if (prob>0.5)
+ {
+ //add one
+ if (ptr[i]<a[tmp+3])
+ ptr[i]++;
+ else
+ ptr[i]=-1*a[tmp+3];
+ }
+ else
+ {
+ //subtract one
+ if (ptr[i]>(-1*a[tmp+3]))
+ ptr[i]--;
+ else
+ ptr[i]=a[tmp+3];
+ }
+ }
+ }
+ return *this;
+}
+
+void crossoverOP(IndividualInt& a,IndividualInt& b)
+{
+ if (a.length != b.length)
+ {
+ cerr<<"try to crossover two different size of individuals with OP"<<endl;
+ exit(1);
+ }
+ time_t t;
+ srand((unsigned) time(&t));
+
+ int Fir=random(a.length);
+ /* crossover through exchanging information */
+ for (int i=Fir; i<a.length;i++)
+ {
+ int Tmp=a[i];
+ a.ptr[i]=b.ptr[i];
+ b.ptr[i]=Tmp;
+ }/* end for */
+}
+
+void crossoverTP(IndividualInt& a,IndividualInt& b)
+{
+ if (a.length != b.length)
+ {
+ cerr<<"try to crossover two different size of individuals with OP"<<endl;
+ exit(1);
+ }
+ time_t t;
+ srand((unsigned) time(&t));
+ int Fir=random(a.length); /* choose the first crossover point */
+
+ int Sec=random(a.length); /* choose the second crossover point */
+ Sec=(2*Sec)%a.length;
+ /* sort crossover points */
+ int tmp;
+ if (Fir>=Sec)
+ {
+ tmp=Fir;
+ Fir=Sec;
+ Sec=tmp;
+ }
+ /* crossover through exchanging information */
+ for (int i=Fir; i<Sec;i++)
+ {
+ int Tmp=a.ptr[i];
+ a.ptr[i]=b.ptr[i];
+ b.ptr[i]=Tmp;
+ }/* end for */
+}
+
+void crossoverUniform(IndividualInt& a,IndividualInt& b)
+{
+ if (a.length != b.length)
+ {
+ cerr<<"try to crossover two different size of individuals with OP"<<endl;
+ exit(1);
+ }
+ time_t t;
+ srand((unsigned) time(&t));
+ for (int i=0; i<a.length;i++)
+ {
+ float prob=(1.0*random(1000))/1000;
+ if (prob>0.5)
+ {
+ int Tmp=a.ptr[i];
+ a.ptr[i]=b.ptr[i];
+ b.ptr[i]=Tmp;
+ }
+ }/* end for */
+}
+
+FuzzyRuleSet
+IndividualInt::formRuleSet(const FuzzyRule& a) const
+{
+ int tmp=a.get_variableSize()+a.get_outputSize();
+ if (((length-1)%tmp)!=0)
+ {
+ cerr<<"in chromint.cpp length of individual is not exact times of length";
+ cout<<" of range individual in formRuleSet"<<endl;
+ cerr<<"length="<<length<<" tmp="<<tmp<<endl;
+ exit(1);
+ }
+ FuzzyRuleSet ruleSet;
+ vector<int> invec(a.get_variableSize());
+ vector<int> outvec(a.get_outputSize());
+ //get zero input and output vector
+ vector<int> inzero(a.get_variableSize());
+ vector<int> outzero(a.get_outputSize());
+ for (int i=0;i<a.get_variableSize();i++)
+ inzero[i]=0;
+ for (i=0;i<a.get_outputSize();i++)
+ outzero[i]=0;
+ FuzzyRule tmprule;
+ //add feasible rules
+ for (i=0;i<ptr[0];i++) //ptr[0] store the maximum rule no.
+ {
+ for (int j=0;j<a.get_variableSize();j++)
+ invec[j]=ptr[i*tmp+j+1];
+ for (j=0;j<a.get_outputSize();j++)
+ outvec[j]=ptr[i*tmp+a.get_variableSize()+j+1];
+ if ((invec !=inzero)&&(outvec !=outzero))
+ {
+ tmprule=a;
+ tmprule.change_inputSetFlag(invec);
+ tmprule.change_outputSetFlag(outvec);
+ tmprule.form_ruleContent();
+ ruleSet.addRule(tmprule);
+ }
+ }
+ return ruleSet;
+}
+
+FuzzyRuleSet
+IndividualInt::formRuleSet_RM(const FuzzyRule& a,const IndividualInt& b) const
+{
+ int tmpin=a.get_variableSize();
+ int tmpout=a.get_outputSize();
+ int tmp=tmpin+tmpout;
+ int tmplen=1;
+ for (int i=2;i<b.length;i++)
+ tmplen +=2*b[i]; //length for tuning membership functions
+ if ((((length-tmplen)%(b.length-2)) !=0)||(tmp!=(b.length-2)))
+ {
+ cerr<<"in formRuleSet_RM, chromint.cpp, length of individual is not exact times of length of range individual"<<endl;
+ cerr<<"length="<<length<<" tmp="<<tmp<<endl;
+ exit(1);
+ }
+ FuzzyRuleSet ruleSet;
+ FuzzyRule tmpruleA;
+ //tuning membership function of rule a
+ int iter=0;
+ tmpruleA=a;
+ //input variables
+ for (i=0;i<tmpin;i++)
+ {
+ float var_startPoint=a[i].get_startPoint();
+ float var_endPoint=a[i].get_endPoint();
+ float var_length=var_endPoint-var_startPoint;
+ float stepSize=var_length/(b[i+2]+1);
+ float set_startPoint;
+ float set_endPoint;
+ for (int j=0;j<b[i+2];j++) //variable i has b[i+2] fuzzy sets
+ {
+ //get the start point
+ iter++;
+ if (j==0)
+ set_startPoint=(j+1)*stepSize - (((b[1]+ptr[iter])*stepSize)/(2.0*b[1]))+var_startPoint;
+ else
+ set_startPoint=(j+1)*stepSize - (((ptr[iter]+b[1]/2)*stepSize)/b[1])+var_startPoint;
+ //get the end point
+ iter++;
+ if (j==(b[i+2]-1))
+ set_endPoint=(j+1)*stepSize + (((b[1]+ptr[iter])*stepSize)/(2.0*b[1])) +var_startPoint;
+ else
+ set_endPoint=(j+1)*stepSize + (((ptr[iter]+b[1]/2)*stepSize)/b[1]) +var_startPoint;
+ //change startPoint and endPoint
+ assert((set_endPoint-set_startPoint)>0);
+ tmpruleA[i][j].change_startPoint(set_startPoint);
+ tmpruleA[i][j].change_endPoint(set_endPoint);
+ }
+ }
+ //output variable
+ for (i=0;i<tmpout;i++)
+ {
+ float var_startPoint=a.get_outputVariable(i).get_startPoint();
+ float var_endPoint=a.get_outputVariable(i).get_endPoint();
+ float var_length=var_endPoint-var_startPoint;
+ float stepSize=var_length/(b[i+2+tmpin]+1);
+ for (int j=0;j<b[i+2+tmpin];j++) //variable i has b[i+2+tmpin] fuzzy sets
+ {
+ //get the start point
+ iter++;
+ float set_startPoint=(j+1)*stepSize - (((1+ptr[iter])*stepSize)/b[1])+var_startPoint;
+ //get the end point
+ iter++;
+ float set_endPoint=(j+1)*stepSize + (((1+ptr[iter])*stepSize)/b[1])+var_startPoint;
+ //change startPoint and endPoint
+ assert((set_endPoint-set_startPoint)>0);
+ tmpruleA.get_outputVariable(i)[j].change_startPoint(set_startPoint);
+ tmpruleA.get_outputVariable(i)[j].change_endPoint(set_endPoint);
+ }
+ }
+ //get rule set
+ vector<int> invec(tmpin);
+ vector<int> outvec(tmpout);
+ //get zero input and output vector
+ vector<int> inzero(tmpin);
+ vector<int> outzero(tmpout);
+ for (i=0;i<tmpin;i++)
+ inzero[i]=0;
+ for (i=0;i<tmpout;i++)
+ outzero[i]=0;
+ FuzzyRule tmprule;
+ //add feasible rules
+ for (i=0;i<ptr[0];i++) //ptr[0] store the maximum rule no.
+ {
+ for (int j=0;j<tmpin;j++)
+ invec[j]=ptr[i*tmp+j+iter+1];
+ for (j=0;j<tmpout;j++)
+ outvec[j]=ptr[i*tmp+tmpin+j+iter+1];
+ if ((invec !=inzero)&&(outvec !=outzero))
+ {
+ tmprule=tmpruleA;
+ tmprule.change_inputSetFlag(invec);
+ tmprule.change_outputSetFlag(outvec);
+ tmprule.form_ruleContent();
+ ruleSet.addRule(tmprule);
+ }
+ }
+ return ruleSet;
+}
+
+FuzzyRuleSet
+IndividualInt::formRuleSet_RMT(const FuzzyRule& a,const IndividualInt& b) const
+{
+ int tmpin=a.get_variableSize();
+ int tmpout=a.get_outputSize();
+ int tmp=tmpin+tmpout;
+ int tmplen=1;
+ for (int i=3;i<b.length;i++)
+ tmplen +=3*b[i]; //length for tuning membership functions ans type
+ if ((((length-tmplen)%(b.length-3)) !=0)||(tmp!=(b.length-3)))
+ {
+ cerr<<"in formRuleSet_RMT, chromint.cpp, length of individual is not exact times of length of range individual"<<endl;
+ cerr<<"length="<<length<<" tmp="<<tmp<<endl;
+ exit(1);
+ }
+ FuzzyRuleSet ruleSet;
+ FuzzyRule tmpruleA;
+ //tuning membership function of rule a
+ int iter=0;
+ tmpruleA=a;
+ //input variables
+ for (i=0;i<tmpin;i++)
+ {
+ float var_startPoint=a[i].get_startPoint();
+ float var_endPoint=a[i].get_endPoint();
+ float var_length=var_endPoint-var_startPoint;
+ float stepSize=var_length/(b[i+3]+1);
+ float set_startPoint;
+ float set_endPoint;
+ for (int j=0;j<b[i+3];j++) //variable i has b[i+3] fuzzy sets
+ {
+ //get the start point
+ iter++;
+ if (j==0)
+ set_startPoint=(j+1)*stepSize - (((b[1]+ptr[iter])*stepSize)/(2.0*b[1]))+var_startPoint;
+ else
+ set_startPoint=(j+1)*stepSize - (((ptr[iter]+b[1]/2)*stepSize)/b[1])+var_startPoint;
+ //get the end point
+ iter++;
+ if (j==(b[i+3]-1))
+ set_endPoint=(j+1)*stepSize + (((b[1]+ptr[iter])*stepSize)/(2.0*b[1])) +var_startPoint;
+ else
+ set_endPoint=(j+1)*stepSize + (((ptr[iter]+b[1]/2)*stepSize)/b[1]) +var_startPoint;
+ //get function type
+ iter++;
+ int typeF=ptr[iter];
+ //change startPoint and endPoint
+ assert((set_endPoint-set_startPoint)>0);
+ tmpruleA[i][j].change_startPoint(set_startPoint);
+ tmpruleA[i][j].change_endPoint(set_endPoint);
+ tmpruleA[i][j].change_functionType(typeF);
+ }
+ }
+ //output variable
+ for (i=0;i<tmpout;i++)
+ {
+ float var_startPoint=a.get_outputVariable(i).get_startPoint();
+ float var_endPoint=a.get_outputVariable(i).get_endPoint();
+ float var_length=var_endPoint-var_startPoint;
+ float stepSize=var_length/(b[i+3+tmpin]+1);
+ for (int j=0;j<b[i+3+tmpin];j++) //variable i has b[i+3+tmpin] fuzzy sets
+ {
+ //get the start point
+ iter++;
+ float set_startPoint=(j+1)*stepSize - (((1+ptr[iter])*stepSize)/b[1])+var_startPoint;
+ //get the end point
+ iter++;
+ float set_endPoint=(j+1)*stepSize + (((1+ptr[iter])*stepSize)/b[1])+var_startPoint;
+ //get function type
+ iter++;
+ int typeF=ptr[iter];
+ //change startPoint and endPoint
+ assert((set_endPoint-set_startPoint)>0);
+ tmpruleA.get_outputVariable(i)[j].change_startPoint(set_startPoint);
+ tmpruleA.get_outputVariable(i)[j].change_endPoint(set_endPoint);
+ tmpruleA.get_outputVariable(i)[j].change_functionType(typeF);
+ }
+ }
+ //get rule set
+ vector<int> invec(tmpin);
+ vector<int> outvec(tmpout);
+ //get zero input and output vector
+ vector<int> inzero(tmpin);
+ vector<int> outzero(tmpout);
+ for (i=0;i<tmpin;i++)
+ inzero[i]=0;
+ for (i=0;i<tmpout;i++)
+ outzero[i]=0;
+ FuzzyRule tmprule;
+ //add feasible rules
+ for (i=0;i<ptr[0];i++) //ptr[0] store the maximum rule no.
+ {
+ for (int j=0;j<tmpin;j++)
+ invec[j]=ptr[i*tmp+j+iter+1];
+ for (j=0;j<tmpout;j++)
+ outvec[j]=ptr[i*tmp+tmpin+j+iter+1];
+ if ((invec !=inzero)&&(outvec !=outzero))
+ {
+ tmprule=tmpruleA;
+ tmprule.change_inputSetFlag(invec);
+ tmprule.change_outputSetFlag(outvec);
+ tmprule.form_ruleContent();
+ ruleSet.addRule(tmprule);
+ }
+ }
+ return ruleSet;
+}
+
+float
+IndividualInt::fitness(const FuzzyRule& a, const array& b,const vector<int>& cn,const int&c, const int& d,const int& e) const
+{
+ int outVarDim=a.get_outputSize();
+ int inVarDim=a.get_variableSize();
+ int tmpint=inVarDim+outVarDim;
+ int arrayLen=b.len();
+ if (tmpint !=b.wid())
+ {
+ cerr<<"dim of input is not exact number of variables in fitness"<<endl;
+ exit(1);
+ }
+ //get rule set
+ FuzzyRuleSet ruleSet=this->formRuleSet(a);
+ if (ruleSet.get_ruleSetSize()==0) return 0;
+ vector<float> invec(inVarDim);
+ vector<int> outvec(outVarDim);
+ //begin classify
+ int misclassify=0;
+ vector<int> cla(outVarDim);
+ vector<float> tmp(outVarDim);
+ for (int i=0;i<arrayLen;i++)
+ {
+ //get input vector for the ith row
+ for (int j=0;j<inVarDim;j++)
+ invec[j]=b[i][j];
+ for (j=0;j<outVarDim;j++)
+ outvec[j]=(int)(b[i][inVarDim+j]+0.001);
+ if (ruleSet.checkRuleSetFired(invec)==1)
+ {
+ tmp=ruleSet.output(invec,c,d,e); //ruleEffectFlag, fuzzyFlag, defuzzyFlag
+ //get output class
+ for (j=0;j<outVarDim;j++)
+ cla[j]=(int)(tmp[j]*cn[j]);
+ //output data dim equal to outputVariable dim
+ if (cla !=outvec)
+ misclassify++;
+ }
+ else
+ misclassify++;
+ }
+ float result=(1.0*(arrayLen-misclassify))/b.len();
+ return result ;
+}
+
+float
+IndividualInt::fitness_RM(const FuzzyRule& a, const array& b,const vector<int>& cn,const int&c, const int& d,const int& e,const IndividualInt& f) const
+{
+ int outVarDim=a.get_outputSize();
+ int inVarDim=a.get_variableSize();
+ int tmpint=inVarDim+outVarDim;
+ int arrayLen=b.len();
+ if (tmpint !=b.wid())
+ {
+ cerr<<"dimension of iuput is not exact number of variables in fitness_RM"<<endl;
+ exit(1);
+ }
+ //get rule set
+ FuzzyRuleSet ruleSet=this->formRuleSet_RM(a,f);
+ if (ruleSet.get_ruleSetSize()==0)
+ return 0;
+ vector<float> invec(inVarDim);
+ vector<int> outvec(outVarDim);
+ //begin classify
+ int misclassify=0;
+ vector<int> cla(outVarDim);
+ vector<float> tmp(outVarDim);
+ for (int i=0;i<arrayLen;i++)
+ {
+ //get input vector for the ith row
+ for (int j=0;j<inVarDim;j++)
+ invec[j]=b[i][j];
+ for (j=0;j<outVarDim;j++)
+ outvec[j]=(int)(b[i][inVarDim+j]+0.001);
+ if (ruleSet.checkRuleSetFired(invec)==1)
+ {
+ tmp=ruleSet.output(invec,c,d,e);
+ //get output class
+ for (j=0;j<outVarDim;j++)
+ cla[j]=(int)(tmp[j]*cn[j]);
+ //output data dim equal to outputVariable dim
+ if (cla !=outvec)
+ misclassify++;
+ }
+ else
+ misclassify++;
+ }
+ float result=(1.0*(arrayLen-misclassify))/b.len();
+ return result ;
+}
+
+float
+IndividualInt::fitness_RMT(const FuzzyRule& a, const array& b,const vector<int>& cn,const int&c, const int& d,const int& e,const IndividualInt& f) const
+{
+ int outVarDim=a.get_outputSize();
+ int inVarDim=a.get_variableSize();
+ int tmpint=inVarDim+outVarDim;
+ int arrayLen=b.len();
+ if (tmpint !=b.wid())
+ {
+ cerr<<"input dimension is not exact number of variables in fitness_RMT"<<endl;
+ exit(1);
+ }
+ //get rule set
+ FuzzyRuleSet ruleSet=this->formRuleSet_RMT(a,f);
+ if (ruleSet.get_ruleSetSize()==0)
+ return 0;
+ vector<float> invec(inVarDim);
+ vector<int> outvec(outVarDim);
+ //begin classify
+ int misclassify=0;
+ vector<int> cla(outVarDim);
+ vector<float> tmp(outVarDim);
+ for (int i=0;i<arrayLen;i++)
+ {
+ //get input vector for the ith row
+ for (int j=0;j<inVarDim;j++)
+ invec[j]=b[i][j];
+ for (j=0;j<outVarDim;j++)
+ outvec[j]=(int)(b[i][inVarDim+j]+0.001);
+ if (ruleSet.checkRuleSetFired(invec)==1)
+ {
+ tmp=ruleSet.output(invec,c,d,e);
+ //get output class
+ for (j=0;j<outVarDim;j++)
+ cla[j]=(int)(tmp[j]*cn[j]);
+ //output data dim equal to outputVariable dim
+ if (cla !=outvec)
+ misclassify++;
+ }
+ else
+ misclassify++;
+ }
+ float result=(1.0*(arrayLen-misclassify))/b.len();
+ return result ;
+}
+
+//operators
+
+int&
+IndividualInt::operator [] (int i) const
+{
+ assert(i>=0&&i<length);
+ return ptr[i];
+}
+
+IndividualInt&
+IndividualInt::operator =(const IndividualInt& a)
+{
+ if ((&a)==this) return *this;
+ delete []ptr;
+ length=a.length;
+ m_rate=a.m_rate;
+ ptr=new int[length];
+ assert(ptr !=0);
+ for (int i=0;i<length;i++)
+ ptr[i]=a.ptr[i];
+ return *this;
+}
+
+//I/O operators
+ostream& operator <<(ostream& os, const IndividualInt& a)
+{
+ os<<a.length<<"\t"<<a.m_rate<<endl;
+ os<<"\t";
+ int sum=0;
+ for (int i=0;i<a.length;i++)
+ {
+ os<<a[i]<<"\t";
+ sum++;
+ if ((sum%8)==0)
+ {
+ os<<endl;
+ os<<"\t";
+ sum=0;
+ }
+ }
+ os<<endl;
+ return os;
+}
+
+istream& operator >>(istream& is,IndividualInt& a)
+{
+ is>>a.length>>a.m_rate;
+ if (a.ptr !=0)
+ delete []a.ptr;
+ a.ptr=new int[a.length];
+ assert(a.ptr !=0);
+ for (int i=0;i<a.length;i++)
+ is>>a.ptr[i];
+ return is;
+}
+
+
+
diff --git a/fu_ga_fs/chromint.h b/fu_ga_fs/chromint.h
new file mode 100644
index 0000000..4c3ceee
--- /dev/null
+++ b/fu_ga_fs/chromint.h
@@ -0,0 +1,80 @@
+#ifndef __CHROMINT_H__
+#define __CHROMINT_H__
+
+
+#include "ruleset.h"
+#include "fuzzyrul.h"
+#include "variablf.h"
+#include "memberf.h"
+#include "vector.h"
+#include "array.h"
+
+
+class IndividualInt
+{
+private:
+ int length; //length of the individual
+ int* ptr; //pointer to the Individual
+ float m_rate; //mutation rate
+public:
+ IndividualInt():length(0),ptr(0),m_rate(0) {}
+ IndividualInt(int a,float b);
+ IndividualInt(int a,int* b,float c);
+ IndividualInt(vector<int> a, float b);
+ IndividualInt(const IndividualInt& a);
+
+ ~IndividualInt() {delete []ptr;}
+
+ //member function
+ int get_length() const {return length;}
+ float get_mrate() const {return m_rate;}
+ IndividualInt& change_mrate(const float& a);
+ IndividualInt& change_length(const int& a);
+ IndividualInt& initialize(const int& a,const int& b);
+ IndividualInt& initialize_range(const IndividualInt& a);
+ //for evolving rule set only
+ IndividualInt& initialize_range_RM(const IndividualInt& a);
+ //for evolving rule set and tuning membersgip function
+ IndividualInt& initialize_range_RMT(const IndividualInt& a);
+ //for evolving rule set and tuning membersgip function and type
+ FuzzyRuleSet formRuleSet(const FuzzyRule& a) const;
+ //a:base rule, for rule set only
+ FuzzyRuleSet formRuleSet_RM(const FuzzyRule& a,const IndividualInt& b) const;
+ //a:base rule, b:range, for rule set & membership functions
+ FuzzyRuleSet formRuleSet_RMT(const FuzzyRule& a,const IndividualInt& b) const;
+ //a:base rule, b:range, for rule set & membership functions and type
+ float fitness(const FuzzyRule& a, const array& b,const vector<int>& cn,const int&c,const int& d,const int&e) const;
+ //a: base rule, b: input array c:ruleEffectFlag
+ //d:fuzzyFlag e:defuzzyFlag cn:class no. for output
+ //for evolving rule set only
+ float fitness_RM(const FuzzyRule& a, const array& b,const vector<int>& cn,const int&c,const int& d,const int& e,const IndividualInt& f) const;
+ //a: base rule, b: input array c:ruleEffectFlag
+ //d:fuzzyFlag e:defuzzyFlag cn:class no. for output
+ //f: range individual
+ //for evolving rule set and tuning membership functions
+ float fitness_RMT(const FuzzyRule& a, const array& b,const vector<int>& cn,const int&c,const int& d,const int& e,const IndividualInt& f) const;
+ //a: base rule, b: input array c:ruleEffectFlag
+ //d:fuzzyFlag e:defuzzyFlag cn:class no. for output
+ //f: range individual
+ //for evolving rule set and tuning membership functions and type
+ IndividualInt& mutate_one(const IndividualInt& a);
+ //a: rule range, for evolving rule set only
+ IndividualInt& mutate_one_RM(const IndividualInt& a);
+ //a: rule range, for evolving rule set & tuning membership functions
+ IndividualInt& mutate_one_RMT(const IndividualInt& a);
+ //a: rule range, for evolving rule set & tuning membership functions & type
+
+ friend void crossoverOP(IndividualInt& a,IndividualInt& b);
+ friend void crossoverTP(IndividualInt& a,IndividualInt& b);
+ friend void crossoverUniform(IndividualInt& a,IndividualInt& b);
+
+ //operators
+ int& operator [] (int i) const;
+ IndividualInt& operator =(const IndividualInt& a);
+
+ //I/O operators
+ friend istream& operator >>(istream& is,IndividualInt& a);
+ friend ostream& operator <<(ostream& os,const IndividualInt& a);
+};
+#endif
+
diff --git a/fu_ga_fs/extern.h b/fu_ga_fs/extern.h
new file mode 100644
index 0000000..257b61b
--- /dev/null
+++ b/fu_ga_fs/extern.h
@@ -0,0 +1,10 @@
+#ifndef _EXTERN_G_H
+#define _EXTERN_G_H
+
+#define NAME_MAX 80
+extern char resultFileName[NAME_MAX], dataFileName[NAME_MAX];
+extern int fuzzyFlag; //1: average 0:minimum
+extern int defuzzyFlag; //0: maximum, 1: centroid without overlap, 2: with overlap
+extern int ruleEffectFlag; //1:maximum 0:add output values from each rule
+
+#endif
diff --git a/fu_ga_fs/fl.cpp b/fu_ga_fs/fl.cpp
new file mode 100644
index 0000000..e42d173
--- /dev/null
+++ b/fu_ga_fs/fl.cpp
@@ -0,0 +1,208 @@
+#include <fstream.h>
+#include <stdlib.h>
+#include <stdio.h>
+
+#include "memberf.h"
+#include "variablf.h"
+#include "vector.h"
+#include "fuzzyrul.h"
+#include "ruleset.h"
+
+#include "extern.h"
+
+#define max(a,b) ((a-b)>0)?a:b
+
+#define NAME_MAX 80
+
+char ruleName[NAME_MAX];
+char ruleInName[NAME_MAX];
+
+FuzzyRuleSet ruleSet;
+
+static void read_fl_runfile(char *dataFile); // read fuzzy system run file
+static void read_fl_rulefile(void); // read fuzzy system rule file
+static void write_fl_rules(void); // output fuzzy rules in verbal to a file
+static void view_fl_result(void);
+
+void fl (char *dataFile)
+{
+ read_fl_runfile(dataFile);
+ read_fl_rulefile();
+ write_fl_rules();
+
+ ifstream dFile;
+ dFile.open(dataFileName,ios::in);
+ if (!dFile)
+ {
+ cerr<<"cann't open file "<<dataFileName<<" for input"<<endl;
+ exit(1);
+ }
+
+ int indim,outdim; //input dim and output dim
+ dFile>>indim>>outdim;
+ vector<float> invec(indim);
+ vector<int> outvec(outdim);
+ vector<int> classN(outdim); //store class no. for each output
+ dFile>>classN;
+
+ int outVarDim=ruleSet[0].get_outputSize();
+ if (outdim !=outVarDim)
+ {
+ cout<<"dim of data outputs isn't equal to dim of output variables in rules"<<endl;
+ exit(1);
+ }
+
+ ofstream rFile;
+ rFile.open(resultFileName,ios::out);
+ if (!rFile)
+ {
+ cerr<<"cann't open file " <<resultFileName<< " for output\n"<<endl;
+ exit(1);
+ }
+
+ rFile<<"index\t"<<"Wrong?\t"<<"Target\t"<<"Obtained"<<endl;
+
+ int in_order=0;
+ int misclassify=0;
+ vector<int> cla(outVarDim);
+ vector<float> tmp(outVarDim);
+
+ while (dFile>>invec)
+ {
+ dFile>>outvec;
+ in_order++;
+ rFile<<in_order<<"\t";
+ if (ruleSet.checkRuleSetFired(invec)==1)
+ {
+ tmp=ruleSet.output(invec,ruleEffectFlag,fuzzyFlag,defuzzyFlag);
+
+ //get output class
+ for (int idx=0;idx<outVarDim;idx++)
+ cla[idx]=(int)(tmp[idx]*classN[idx]);
+
+ //output data dim equal to outputVariable dim
+ if (cla !=outvec)
+ {
+ rFile<<"wrong\t";
+ misclassify++;
+ }
+ else
+ rFile<<"\t";
+
+ rFile<<(outvec|cla);
+ }
+ else
+ {
+ rFile<<"rule set not fired"<<endl;
+ misclassify++;
+ }
+ }
+ dFile.close();
+
+ rFile<<"total misclassification is :"<<misclassify<<endl;
+ rFile.close();
+
+ view_fl_result();
+}
+
+static void read_fl_runfile (char *dataFile)
+{
+ int true;
+ char Msg[NAME_MAX];
+ strcpy(Msg,"edit ");
+ strcat(Msg,dataFile);
+
+ ifstream runFile;
+ do
+ {
+ runFile.open(dataFile,ios::in);
+ if (!runFile)
+ {
+ cerr<<"cann't open file "<<dataFile<<" for input"<<endl;
+ exit(1);
+ }
+ runFile>>ruleInName>>dataFileName>>ruleName>>resultFileName;
+ runFile>>fuzzyFlag>>defuzzyFlag>>ruleEffectFlag;
+ runFile.close();
+ cout<<ruleInName<<endl;
+ cout<<dataFileName<<endl;
+ cout<<ruleName<<endl;
+ cout<<resultFileName<<endl;
+ cout<<fuzzyFlag<<" 0:minimum 1:average"<<endl;
+ cout<<defuzzyFlag<<" 0:maximum 1:without overlap 2:with overlap"<<endl;
+ cout<<ruleEffectFlag<<" 1: maximum of output values from each rule 0:add"<<endl;
+ cout<<"(C)ontinue, (Q)uit, (M)odify runfile ";
+ char condition;
+ cin>>condition;
+ switch(condition)
+ {
+ case 'c': true=0;
+ break;
+ case 'C': true=0;
+ break;
+ case 'q': exit(1);
+ case 'Q': exit(1);
+ case 'm': true=1;
+ system(Msg);
+ break;
+ case 'M': true=1;
+ system(Msg);
+ break;
+ default:
+ true=1;
+ break;
+ }
+ } while (true==1);
+}
+
+static void read_fl_rulefile (void)
+{
+ // FuzzyRule
+ ifstream iFile;
+ iFile.open(ruleInName,ios::in);
+ if (!iFile)
+ {
+ cerr<<"cann't open file "<<ruleInName<<" for input"<<endl;
+ exit(1);
+ }
+
+ iFile>>ruleSet;
+ iFile.close();
+}
+
+static void write_fl_rules (void)
+{
+ //output formed rules
+ ofstream oFile;
+ oFile.open(ruleName,ios::out);
+ if (!oFile)
+ {
+ cerr<<"cann't open file "<<ruleName<<" for output"<<endl;
+ exit(1);
+ }
+ for (int i=0;i<ruleSet.get_ruleSetSize();i++)
+ oFile<<i<<"th rule: "<<ruleSet[i].get_ruleContent()<<endl;
+ oFile.close();
+}
+
+static void view_fl_result (void)
+{
+ cout<<"view classification result? Y/N ";
+ char condition;
+ int true;
+ char Msg[NAME_MAX];
+ do
+ {
+ cin >>condition;
+ if ((condition=='Y')||(condition=='y'))
+ {
+ strcpy(Msg,"edit ");
+ strcat(Msg,resultFileName);
+ system(Msg);
+ true=1;
+ cout<<"view classification result again? Y/N ";
+ }
+ else
+ true=0;
+ }while (true==1);
+}
diff --git a/fu_ga_fs/flga.cpp b/fu_ga_fs/flga.cpp
new file mode 100644
index 0000000..eb009c5
--- /dev/null
+++ b/fu_ga_fs/flga.cpp
@@ -0,0 +1,93 @@
+#include "headfile.h"
+
+#define NAME_MAX 80
+char resultFileName[NAME_MAX], dataFileName[NAME_MAX];
+int fuzzyFlag; //1: average 0:minimum
+int defuzzyFlag; //0: maximum, 1: centroid without overlap, 2: with overlap
+int ruleEffectFlag; //1:maximum 0:add output values from each rule
+
+void
+main(int argc,char *argv[])
+{
+ extern void ga(char *);
+ extern void fl(char *);
+
+ if (argc!=2)
+ {
+ printf("usage: flga runfile\n");
+ exit(1);
+ }
+ char gaName[80],flName[80];
+ char Msg[80];
+ strcpy(Msg,"edit ");
+ strcat(Msg,argv[1]);
+ char condition;
+ int true=1;
+ do
+ {
+ clrscr();
+ ifstream runFile;
+ runFile.open(argv[1],ios::in);
+ if (!runFile)
+ {
+ cerr<<"cann't open file "<<argv[1]<<" for input"<<endl;
+ exit(1);
+ }
+ runFile>>gaName>>flName;
+ cout<<gaName<<" run file for generating rules"<<endl;
+ cout<<flName<<" run file for classification using fuzzy rule system"<<endl;
+ runFile.close();
+ cout<<"C: continue"<<endl;
+ cout<<"M: modify"<<endl;
+ cout<<"Q: quit"<<endl;
+ cout<<"your choice? ";
+ cin>>condition;
+ switch(condition)
+ {
+ case 'c':
+ case 'C':
+ true=0;
+ break;
+ case 'm':
+ case 'M':
+ system(Msg);
+ break;
+ case 'q':
+ case 'Q':
+ exit(1);
+ default:
+ break;
+ }
+ }while (true==1);
+
+ true=1;
+ do
+ {
+ clrscr();
+ cout<<"G: generating rules"<<endl;
+ cout<<"C: classification"<<endl;
+ cout<<"Q: quit"<<endl;
+ cout<<"your choice? ";
+ cin>>condition;
+ switch(condition)
+ {
+ case 'g':
+ case 'G':
+ ga(gaName);
+ break;
+ case 'c':
+ case 'C':
+ fl(flName);
+ break;
+ case 'q':
+ case 'Q':
+ true=0;
+ break;
+ default:
+ true=0;
+ }
+ } while (true==1);
+}
+
+
+
diff --git a/fu_ga_fs/fuzzyrul.cpp b/fu_ga_fs/fuzzyrul.cpp
new file mode 100644
index 0000000..da54b29
--- /dev/null
+++ b/fu_ga_fs/fuzzyrul.cpp
@@ -0,0 +1,430 @@
+#include "headfile.h"
+#include "fuzzyrul.h"
+
+//constructors
+FuzzyRule::FuzzyRule(int a,int b,vector<int> c,vector<int> d):
+variableSize(a),outputSize(b)
+{
+ if ((c.len()!=variableSize)||(d.len()!=outputSize))
+ {
+ fprintf(stderr,"input(or output)Vector dimension doesn't equal rule's input(output) dimension");
+ exit(1);
+ }
+ inputSetFlag=c;
+ outputSetFlag=d;
+ inputVariable= new FuzzyVariable[variableSize];
+ assert(inputVariable !=0);
+ outputVariable= new FuzzyVariable[outputSize];
+ assert(outputVariable !=0);
+ ruleContent=new char[256];
+ assert(ruleContent !=0);
+ strcpy(ruleContent,"rule_not_formed_yet");
+}
+
+FuzzyRule::FuzzyRule(int a,int b,vector<int> c,vector<int> d,char* str):
+variableSize(a),outputSize(b)
+{
+ if ((c.len()!=variableSize)||(d.len()!=outputSize))
+ {
+ fprintf(stderr,"input(or output)Vector dimension doesn't equal rule's input(output) dimension");
+ exit(1);
+ }
+ inputSetFlag=c;
+ outputSetFlag=d;
+ inputVariable= new FuzzyVariable[variableSize];
+ assert(inputVariable !=0);
+ outputVariable= new FuzzyVariable[outputSize];
+ assert(outputVariable !=0);
+ int length=strlen(str)+2;
+ ruleContent=new char[length];
+ assert(ruleContent !=0);
+ strncpy(ruleContent,str,length);
+}
+
+FuzzyRule::FuzzyRule(const FuzzyRule& a):
+ variableSize(a.variableSize),outputSize(a.outputSize)
+{
+ inputSetFlag=a.inputSetFlag;
+ outputSetFlag=a.outputSetFlag;
+ inputVariable= new FuzzyVariable[variableSize];
+ assert(inputVariable !=0);
+ for (int i=0;i<variableSize;i++)
+ inputVariable[i]=a.inputVariable[i];
+ outputVariable= new FuzzyVariable[outputSize];
+ assert(outputVariable !=0);
+ for (i=0;i<outputSize;i++)
+ outputVariable[i]=a.outputVariable[i];
+ delete []ruleContent;
+ int length=strlen(a.ruleContent)+2;
+ ruleContent= new char[length];
+ assert(ruleContent !=0);
+ strncpy(ruleContent,a.ruleContent,length);
+}
+
+//member functions
+FuzzyRule&
+FuzzyRule::change_inputSetFlag(const vector<int>& a)
+{
+ //check whether have the correct dimension
+ if (variableSize !=a.len())
+ {
+ fprintf(stderr,"assign inputSetFlag with a different dimension");
+ exit(1);
+ }
+ inputSetFlag=a;
+ return *this;
+}
+
+FuzzyRule&
+FuzzyRule::change_outputSetFlag(const vector<int>& a)
+{
+ //check whether have the correct dimension
+ if (outputSize !=a.len())
+ {
+ fprintf(stderr,"assign outputSetFlag with a different dimension");
+ exit(1);
+ }
+ outputSetFlag=a;
+ return *this;
+}
+
+FuzzyRule&
+FuzzyRule::change_variableSize(const int& a)
+{
+ variableSize=a;
+ return *this;
+}
+
+FuzzyRule&
+FuzzyRule::change_outputSize(const int& a)
+{
+ outputSize=a;
+ return *this;
+}
+
+FuzzyRule&
+FuzzyRule::change_ruleContent(char *str)
+{
+ delete []ruleContent;
+ int length=strlen(str)+2;
+ ruleContent= new char[length];
+ assert(ruleContent !=0);
+ strncpy(ruleContent,str,length);
+ return *this;
+}
+
+FuzzyRule&
+FuzzyRule::form_ruleContent()
+{
+ delete []ruleContent;
+ char str[256];
+ strcpy(str,"if_");
+ int inflag=0,outflag=0;
+ int intmp=0,outtmp=0;
+ for (int i=0;i<variableSize;i++)
+ {
+ if (inputSetFlag[i]==0)
+ inflag++;
+ else
+ {
+ if (intmp>0)
+ strcat(str,"_and_");
+ intmp++;
+ strcat(str,this->inputVariable[i].get_variableName());
+ strcat(str,"_is_");
+ if (inputSetFlag[i]<0)
+ strcat(str,"Not");
+ strcat(str,this->inputVariable[i].setMeaning(this->inputVariable[i].get_setSize(),abs(inputSetFlag[i])-1));
+ }
+ }
+ strcat(str,"_then_");
+ for (i=0;i<outputSize;i++)
+ {
+ if (outputSetFlag[i]==0)
+ outflag++;
+ else
+ {
+ if (outtmp>0)
+ strcat(str,"_and_");
+ outtmp++;
+ strcat(str,this->outputVariable[i].get_variableName());
+ strcat(str,"_is_");
+ if (outputSetFlag[i]<0)
+ strcat(str,"Not");
+ strcat(str,this->outputVariable[i].setMeaning(this->outputVariable[i].get_setSize(),abs(outputSetFlag[i])-1));
+ }
+ }
+ if ((inflag==variableSize)||(outflag==outputSize))
+ {
+ ruleContent=new char[60];
+ assert(ruleContent !=0);
+ strcpy(ruleContent,"This_rule_doesn't_exist");
+ }
+ else
+ {
+ int length=strlen(str)+2;
+ ruleContent=new char[length];
+ assert(ruleContent !=0);
+ strncpy(ruleContent,str,length);
+ }
+ return *this;
+}
+
+FuzzyRule&
+FuzzyRule::change_outputVariable(const FuzzyVariable& a,const int& b)
+{
+ assert(b>=0&&b<outputSize);
+ outputVariable[b]=a;
+ return *this;
+}
+
+FuzzyVariable& //need & ?
+FuzzyRule::get_outputVariable(const int&a) const
+{
+ assert(a>=0&&a<outputSize);
+ return outputVariable[a];
+}
+
+int
+FuzzyRule::checkRuleActive(const vector<float>& a) const
+{//check whether this has been activated
+ assert(a.len()==variableSize);
+ vector<int>* vec;
+ vec= new vector<int>[variableSize];
+ int sum=0;
+ for (int i=0;i<variableSize;i++)
+ {
+ if (inputSetFlag[i]==0)
+ sum++;
+ else
+ {
+ vec[i]=inputVariable[i].setFireFlag(a[i]);
+ int ind=abs(inputSetFlag[i])-1;
+ if (vec[i][ind]==1)
+ sum++;
+ }
+ }
+ delete []vec;
+ if (sum==variableSize)
+ return 1;
+ else
+ return 0;
+}
+
+vector<float>
+FuzzyRule::FuzzyOutput(const vector<float>& a) const
+{
+ //check the input dimension
+ assert(a.len()==variableSize);
+ //check whethe the rule is activated
+ if (checkRuleActive(a) !=1)
+ {
+ fprintf(stderr,"try to use unactivated rule\n");
+ exit(1);
+ }
+ float min=1.0,tmp;
+ for (int i=0;i<variableSize;i++)
+ {
+ if (inputSetFlag[i]!=0)
+ {
+ tmp=inputVariable[i].output(a[i],inputSetFlag[i]);
+ if (min>tmp)
+ min=tmp; //get the minimum value
+ }
+ }
+ vector<float> tmpout(outputSize);
+ int outflag=0;
+ for (i=0;i<outputSize;i++)
+ {
+ if (outputSetFlag[i] ==0)
+ {
+ outflag++;
+ tmpout[i]=0.0;
+ }
+ else
+ {
+ if (outputSetFlag[i]>0)
+ tmpout[i]=min;
+ else
+ {
+ if (min>=0.9999)
+ tmpout[i]=0.0001;
+ else
+ tmpout[i]=1-min;
+ }
+ }
+ }
+ if (outflag==outputSize)
+ {
+ cout<<"output flag can't be zero for all output variables"<<endl;
+ cout<<"before using rule, check whether it is feasible or not"<<endl;
+ cout<<"fuzzyrul.cpp, line 335"<<endl;
+ exit(1);
+ }
+ return tmpout;
+}
+
+vector<float>
+FuzzyRule::FuzzyOutput_average(const vector<float>& a) const
+{
+ //check the input dimension
+ assert(a.len()==variableSize);
+ //check whethe the rule is activated
+ if (checkRuleActive(a) !=1)
+ {
+ fprintf(stderr,"try to use unactivated rule\n");
+ exit(1);
+ }
+ float ave=0.0;
+ int tmp=0;
+ for (int i=0;i<variableSize;i++)
+ {
+ if (inputSetFlag[i]!=0)
+ {
+ ave +=inputVariable[i].output(a[i],inputSetFlag[i]);
+ tmp++;
+ }
+ }
+ ave =ave/tmp;
+ int outflag=0;
+ vector<float> tmpout(outputSize);
+ for (i=0;i<outputSize;i++)
+ {
+ if (outputSetFlag[i] ==0)
+ {
+ outflag++;
+ tmpout[i]=0.0;
+ }
+ else
+ {
+ if (outputSetFlag[i]>0)
+ tmpout[i]=ave;
+ else
+ {
+ if (ave>=0.9999)
+ tmpout[i]=0.0001;
+ else
+ tmpout[i]=1-ave;
+ }
+ }
+ }
+ if (outflag==outputSize)
+ {
+ cout<<"output flag can't be zero for all output variables"<<endl;
+ cout<<"before using rule, check whether it is feasible or not"<<endl;
+ cout<<"fuzzyrul.cpp, line 335"<<endl;
+ exit(1);
+ }
+ return tmpout;
+}
+
+vector<int>
+FuzzyRule::formRange(const int& a) const
+{ //for evolving rule set only
+ int tmp=variableSize+outputSize+1;
+ vector<int> range(tmp);
+ range[0]=a; //how many rules
+ for (int i=0;i<variableSize;i++)
+ range[i+1]=inputVariable[i].get_setSize();
+ for (i=0;i<outputSize;i++)
+ range[variableSize+i+1]=outputVariable[i].get_setSize();
+ return range;
+}
+
+vector<int>
+FuzzyRule::formRange_RM(const int& a,const int& b) const
+{ //for evolving rule set and tuning membership functions
+ int tmp=variableSize+outputSize+2;
+ vector<int> range(tmp);
+ range[0]=a; //how many rules
+ range[1]=b; //how many divisions for each variable
+ for (int i=0;i<variableSize;i++)
+ range[i+2]=inputVariable[i].get_setSize();
+ for (i=0;i<outputSize;i++)
+ range[variableSize+i+2]=outputVariable[i].get_setSize();
+ return range;
+}
+
+vector<int>
+FuzzyRule::formRange_RMT(const int& a,const int& b,const int& c) const
+{ //for evolving rule set and tuning membership functions
+ int tmp=variableSize+outputSize+3;
+ vector<int> range(tmp);
+ range[0]=a; //how many rules
+ range[1]=b; //how many divisions for each variable
+ range[2]=c; //number of membership function types
+ for (int i=0;i<variableSize;i++)
+ range[i+3]=inputVariable[i].get_setSize();
+ for (i=0;i<outputSize;i++)
+ range[variableSize+i+3]=outputVariable[i].get_setSize();
+ return range;
+}
+
+//operators
+FuzzyVariable&
+FuzzyRule::operator [] (int i) const
+{
+ assert(i>=0&&i<variableSize);
+ return inputVariable[i];
+}
+
+FuzzyRule&
+FuzzyRule::operator =(const FuzzyRule& a)
+{
+ if ((&a)==this) return *this;
+ delete []inputVariable;
+ delete []outputVariable;
+ delete []ruleContent;
+ variableSize=a.variableSize;
+ outputSize=a.outputSize;
+ inputSetFlag=a.inputSetFlag;
+ outputSetFlag=a.outputSetFlag;
+ inputVariable=new FuzzyVariable[variableSize];
+ assert(inputVariable !=0);
+ for (int i=0;i<variableSize;i++)
+ inputVariable[i]=a.inputVariable[i];
+ outputVariable=new FuzzyVariable[outputSize];
+ assert(outputVariable !=0);
+ for (i=0;i<outputSize;i++)
+ outputVariable[i]=a.outputVariable[i];
+ int length=strlen(a.ruleContent)+2;
+ ruleContent=new char[length];
+ assert(ruleContent !=0);
+ strncpy(ruleContent,a.ruleContent,length);
+ return *this;
+}
+
+//I/O operators
+ostream& operator <<(ostream& os,const FuzzyRule& a)
+{
+ os<<a.variableSize<<"\t";
+ os<<a.outputSize<<endl;
+ for (int i=0;i<a.variableSize;i++)
+ os<<a[i];
+ for (i=0;i<a.outputSize;i++)
+ os<<a.get_outputVariable(i);
+ vector<int> intvec=a.inputSetFlag|a.outputSetFlag;
+ os<<intvec;
+ return os;
+}
+
+istream& operator >>(istream& is, FuzzyRule& a)
+{
+ int tmpVariableSize, tmpOutputSize;
+ is>>tmpVariableSize>>tmpOutputSize;
+ vector<int> tmpInputSetFlag(tmpVariableSize);
+ vector<int> tmpOutputSetFlag(tmpOutputSize);
+ FuzzyRule tmpRule(tmpVariableSize,tmpOutputSize,tmpInputSetFlag,tmpOutputSetFlag);
+ for (int i=0;i<tmpVariableSize;i++)
+ is>>tmpRule.inputVariable[i];
+ for (i=0;i<tmpOutputSize;i++)
+ is>>tmpRule.outputVariable[i];
+ is>>tmpInputSetFlag>>tmpOutputSetFlag;
+ tmpRule.change_inputSetFlag(tmpInputSetFlag);
+ tmpRule.change_outputSetFlag(tmpOutputSetFlag);
+ tmpRule.form_ruleContent();
+ a=tmpRule;
+ return is;
+}
+
+
diff --git a/fu_ga_fs/fuzzyrul.h b/fu_ga_fs/fuzzyrul.h
new file mode 100644
index 0000000..7e957a0
--- /dev/null
+++ b/fu_ga_fs/fuzzyrul.h
@@ -0,0 +1,73 @@
+#ifndef __FUZZYRUL_H__
+#define __FUZZYRUL_H__
+
+#include "variablf.h"
+#include "memberf.h"
+#include "vector.h"
+
+class FuzzyRule
+{
+private:
+ int variableSize; //number of variables in a rule
+ int outputSize; //number of output in a rule
+ vector<int> inputSetFlag; //vector tell which set is activated for each variable
+ vector<int> outputSetFlag; //vector tell which set is activated for each variable;
+ FuzzyVariable* inputVariable; //pointers to the input variables
+ FuzzyVariable* outputVariable; //pointers to the output variables
+ char* ruleContent;
+
+public:
+ FuzzyRule():variableSize(0),outputSize(0),ruleContent(0),inputVariable(0),outputVariable(0)
+ {
+ vector<int> vec;
+ inputSetFlag=vec;
+ outputSetFlag=vec;
+ }
+ FuzzyRule(int a,int b,vector<int> c,vector<int> d);
+ FuzzyRule(int a,int b,vector<int> c,vector<int> d,char* str);
+ FuzzyRule(const FuzzyRule& a);
+ ~FuzzyRule(){delete []ruleContent; delete []inputVariable;delete []outputVariable;}
+
+ //member functions
+ int get_variableSize() const {return variableSize;}
+ int get_outputSize() const {return outputSize;}
+ vector<int> get_inputSetFlag()const {return inputSetFlag;}
+ vector<int> get_outputSetFlag() const {return outputSetFlag;}
+ char* get_ruleContent() const {return ruleContent;}
+ FuzzyRule& change_inputSetFlag(const vector<int>& a);
+ FuzzyRule& change_outputSetFlag(const vector<int>& a);
+ FuzzyRule& change_variableSize(const int& a);
+ FuzzyRule& change_outputSize(const int& a);
+ FuzzyRule& change_ruleContent(char* str);
+ FuzzyRule& form_ruleContent();
+ FuzzyRule& change_outputVariable(const FuzzyVariable& a,const int& b);
+ //bth outputVariable change to a
+ int checkRuleActive(const vector<float>& a) const;
+ //check this rule is activated via input a or not
+ vector<float> FuzzyOutput(const vector<float>& a) const;
+ //calculate the fuzzy output vector
+ vector<float> FuzzyOutput_average(const vector<float>& a) const;
+ FuzzyVariable& get_outputVariable(const int& a) const;
+ vector<int> formRange(const int& a) const;
+ //a: maximum rules; get possible maximum fuzzy set no. for each variable
+ //for evolve rule set only
+ vector<int> formRange_RM(const int& a,const int& b) const;
+ //a: maximum rules; b: number of division for each variable
+ //get possible maximum fuzzy set no. for each variable
+ //for evolve rule set and base length of membership function
+ vector<int> formRange_RMT(const int& a,const int& b,const int& c) const;
+ //a: maximum rules; b: number of division for each variable
+ //get possible maximum fuzzy set no. for each variable
+ //c: number of function types
+ //for evolve rule set and base length of membership function and type
+
+ //operator
+ FuzzyVariable& operator [] (int i) const;
+ FuzzyRule& operator =(const FuzzyRule& a);
+
+ //I/O operators
+ friend istream& operator >>(istream& is, FuzzyRule& a);
+ friend ostream& operator <<(ostream& os,const FuzzyRule& a);
+};
+#endif
+
diff --git a/fu_ga_fs/ga.cpp b/fu_ga_fs/ga.cpp
new file mode 100644
index 0000000..48643a8
--- /dev/null
+++ b/fu_ga_fs/ga.cpp
@@ -0,0 +1,405 @@
+#include "headfile.h"
+#include "popuint.h"
+#include "chromint.h"
+#include "misc.h"
+#include "extern.h"
+
+#define NAME_MAX 80
+
+char baseRuleName[NAME_MAX];
+int shift; //1:shift 0: no shift for selection
+int flag_c; //0:uniform 1:one point 2:two point crossover
+float c_rate; //crossover rate
+float m_rate; //mutation rate
+int evolve_type; //0:rule set 1:rule set & membership function tuning
+ //2: rule set & membership function tuning and type selection
+int generation; //generation number
+int p_size; //population length
+int ruleNo; //maximum rule number
+int divisionNo; //the number of division for each variable
+int patNo; //training pattern number
+float criterion; //fitness criterion
+
+FuzzyRule baseRule;
+vector<int> rangeint;
+PopulationInt popu;
+vector<float> fitvec;
+int inLen,outLen; //pattern input, output length
+array arrayPat;
+vector<int> classN;
+
+char mutateName[NAME_MAX]; //adapt GA
+int m_flag; //using muate rule, 1:yes, 0:no
+FuzzyRuleSet adaptRuleSet;
+static void read_adapt_rule(void); // read mutate fuzzy rule
+
+static void read_ga_runfile(char *dataFile); // read GA run file
+static void read_fuzzy_base_rule(void);
+static void read_ga_training_patterns(void);
+static void form_range_vector(void);
+static int get_population_length(void);
+static void update_popu(int size, int wide, float c_rate, float m_rate);
+static void popu_initialize(IndividualInt& range);
+static void calculate_fitness(IndividualInt& range);
+static void write_ga_fuzzy_rules(int idx, IndividualInt& range, int best_fit);
+static void popu_mutate(IndividualInt& range, int best_fit);
+static void ga_post_processing(void);
+
+// Listing 9.1 The ga() routine in the fuzzy evolutionary fuzzy rule system implementation
+void ga(char *dataFile)
+{
+ time_t t;
+ srand((unsigned) time(&t));
+
+ read_ga_runfile(dataFile);
+ read_adapt_rule(); // read in fuzzy adapt rules
+
+ //define ga_adapt rule input and output vector
+ vector<float> vecin_m(adaptRuleSet[0].get_variableSize());
+ vector<float> vecout_m(adaptRuleSet[0].get_outputSize());
+
+ read_fuzzy_base_rule();
+
+ //input training data pairs
+ read_ga_training_patterns();
+
+ //form rule range
+ form_range_vector();
+ IndividualInt range(rangeint,0);
+
+ //get population length
+ int tmplen = get_population_length();
+
+ //declarate population
+ update_popu(p_size,tmplen,c_rate,m_rate);
+
+ //population initialize
+ popu_initialize(range); // 11/22/2001
+
+ //vector<float> fitvec(p_size); //store fitness
+ fitvec.changeSize(p_size);
+
+ float prebest=0.1; //store the prrevious best fitness
+
+ //begin generation
+ float nu=0.0; //same fitness repeated number
+ float mrate=m_rate;
+ float crate=c_rate;
+ cout.precision(4);
+
+ cout<<"\t Fitness vs. Generation"<<endl;
+ cout<<"generation\t best\t average\t variance\t "<<endl; //<<m_rate"<<endl;
+ int bestfit;
+ for (int i=0;i<generation;i++)
+ {
+ //calculate the fitness
+ calculate_fitness(range);
+ bestfit=fitvec.maximum_index();
+ float aver=average(fitvec); //average of fitnesses
+ float var=variance(fitvec,aver); //variance of fitnesses
+
+ if ((i%5)==0)
+ cout<<i<<"\t\t"<<fitvec[bestfit]<<"\t"<<aver<<"\t\t"<<var<<endl;
+
+ if (fitvec[bestfit]>criterion)
+ break;
+
+ if (i != (generation -1))
+ { //not the last generation
+ //selection: form new population
+ popu.selection(fitvec,bestfit,shift); //1:sfite
+
+ //change mutate/crossover rate
+ if (m_flag==1)
+ {
+ if (fitvec[bestfit]==prebest)
+ nu +=1.0;
+ else
+ nu=0.0;
+
+ //get mutate/crossover rate using rule
+ vecin_m[0]=fitvec[bestfit];
+ vecin_m[1]=nu;
+ vecin_m[2]=var;
+ vecout_m = adaptRuleSet.output(vecin_m,0,1,1);
+ mrate=vecout_m[0];
+ crate=vecout_m[1];
+ prebest=fitvec[bestfit];
+ popu.change_mrate(mrate);
+ popu.change_crate(crate);
+ }
+ popu.crossover(flag_c,bestfit);
+ popu_mutate(range,bestfit);
+ }
+ }
+ write_ga_fuzzy_rules(i, range, bestfit);
+ ga_post_processing();
+}
+
+static void read_ga_runfile (char *dataFile)
+{
+ int true;
+ char Msg[NAME_MAX];
+ strcpy(Msg,"edit ");
+ strcat(Msg,dataFile);
+ ifstream runFile;
+ char condition;
+ do {
+ clrscr();
+ runFile.open(dataFile,ios::in);
+
+ if (!runFile)
+ {
+ cerr<<"cann't open file "<<dataFile<<" for input"<<endl;
+ exit(1);
+ }
+ runFile>>dataFileName>>baseRuleName>>resultFileName;
+ runFile>>mutateName; //new added
+ runFile>>shift>>flag_c>>c_rate>>m_rate>>m_flag;
+ runFile>>evolve_type>>generation;
+ runFile>>p_size>>ruleNo>>divisionNo>>patNo>>criterion;
+ runFile>>fuzzyFlag>>defuzzyFlag>>ruleEffectFlag;
+ runFile.close();
+
+ cout<<dataFileName<<endl;
+ cout<<baseRuleName<<endl;
+ cout<<resultFileName<<endl;
+ cout<<mutateName<<endl;
+ cout<<shift<<" 0:no shift 1:shift during selection"<<endl;
+ cout<<flag_c<<" 0:uniform 1:one point 2:two point crossover"<<endl;
+ cout<<c_rate<<" crossover probability"<<endl;
+ cout<<m_rate<<" mutation probability"<<endl;
+ cout<<m_flag<<" ga_adapt rule flag 1:yes 0:no"<<endl;
+ cout<<evolve_type<<" 0:rule set 1:rule & function 2:rule & function & type"<<endl;
+ cout<<generation<<" generation number"<<endl;
+ cout<<p_size<<" population size"<<endl;
+ cout<<ruleNo<<" maximum rule number"<<endl;
+ cout<<divisionNo<<" Number of division for each variable"<<endl;
+ cout<<patNo<<" training pattern number"<<endl;
+ cout<<criterion<<" fitness criterion "<<endl;
+
+ cout<<fuzzyFlag<<" 0:minimum 1:average"<<endl;
+ cout<<defuzzyFlag<<" 0:maximum 1:without overlap 2:with overlap"<<endl;
+ cout<<ruleEffectFlag<<" 1: maximum of output values from each rule 0:add"<<endl;
+ cout<<"(C)ontinue, (Q)uit, (M)odify runfile ";
+
+ cin>>condition;
+ switch(condition)
+ {
+ case 'c':
+ case 'C':
+ true=0;
+ break;
+ case 'q':
+ case 'Q': exit(1);
+ case 'm':
+ case 'M': true=1;
+ system(Msg);
+ break;
+ default:
+ true=1;
+ }
+ } while (true==1);
+}
+
+static void read_adapt_rule (void)
+{ // read in fuzzy mutate rules
+ //mutate rule
+ ifstream mFile;
+ mFile.open(mutateName,ios::in);
+ if (!mFile)
+ {
+ cerr<<"cann't open file "<<mutateName<<" for input"<<endl;
+ exit(1);
+ }
+ mFile>>adaptRuleSet;
+ mFile.close();
+}
+
+static void read_fuzzy_base_rule (void)
+{
+ //input base rule
+ ifstream rFile;
+ rFile.open(baseRuleName,ios::in);
+ if (!rFile)
+ {
+ cerr<<"cann't open file base.rul for input"<<endl;
+ exit(1);
+ }
+ rFile>>baseRule;
+ rFile.close();
+}
+
+static void read_ga_training_patterns (void)
+{
+ ifstream dFile;
+ dFile.open(dataFileName,ios::in);
+ if (!dFile)
+ {
+ cerr<<"cann't open file iris.dat for input"<<endl;
+ exit(1);
+ }
+ dFile>>inLen>>outLen;
+ classN.changeSize(outLen);
+ dFile>>classN;
+ arrayPat.changeSize(patNo,inLen+outLen);
+ dFile>>arrayPat;
+ dFile.close();
+}
+
+static void form_range_vector (void)
+{
+ switch(evolve_type)
+ {
+ case 0:
+ rangeint=baseRule.formRange(ruleNo);
+ break;
+ case 1:
+ rangeint=baseRule.formRange_RM(ruleNo,divisionNo);
+ break;
+ default:
+ rangeint=baseRule.formRange_RMT(ruleNo,divisionNo,6); //6: no of types
+ }
+}
+
+static int get_population_length (void)
+{
+ int tmplen=ruleNo*(inLen+outLen) +1; //evolve_type=0
+
+ if (evolve_type==1)
+ {
+ for (int i=2;i<rangeint.len();i++)
+ tmplen +=2*rangeint[i]; //each function has two points
+ }
+ else if (evolve_type==2)
+ {
+ for (int i=3;i<rangeint.len();i++)
+ tmplen +=3*rangeint[i]; //each function has two points and one type
+ }
+ return tmplen;
+}
+
+static void popu_initialize (IndividualInt& range)
+{
+ switch(evolve_type)
+ {
+ case 0:
+ popu.initialize_range(range);
+ break;
+ case 1:
+ popu.initialize_range_RM(range);
+ break;
+ default:
+ popu.initialize_range_RMT(range);
+ }
+}
+
+static void update_popu (int size, int wide, float c_rate, float m_rate)
+{
+ popu.change_length(size);
+ popu.change_width(wide);
+ popu.change_crate(c_rate);
+ popu.change_mrate(m_rate);
+}
+
+static void calculate_fitness (IndividualInt& range)
+{
+ switch(evolve_type)
+ {
+ case 0:
+ fitvec=popu.fitness(baseRule,arrayPat,classN,ruleEffectFlag,fuzzyFlag,defuzzyFlag); //check selection choice
+ break;
+ case 1:
+ fitvec=popu.fitness_RM(baseRule,arrayPat,classN,ruleEffectFlag,fuzzyFlag,defuzzyFlag,range); //check selection choice
+ break;
+ default:
+ fitvec=popu.fitness_RMT(baseRule,arrayPat,classN,ruleEffectFlag,fuzzyFlag,defuzzyFlag,range); //check selection choice
+ }
+}
+
+static void popu_mutate (IndividualInt& range, int bestfit)
+{
+ switch(evolve_type)
+ {
+ case 0:
+ popu.mutate_one(range,bestfit);
+ break;
+ case 1:
+ popu.mutate_one_RM(range,bestfit);
+ break;
+ default:
+ popu.mutate_one_RMT(range,bestfit);
+ }
+}
+
+static void write_ga_fuzzy_rules (int idx, IndividualInt& range, int bestfit)
+{
+ //open file for save result
+ ofstream reFile;
+ reFile.open(resultFileName,ios::out);
+ if (!reFile)
+ {
+ cerr<<"cann't open file result.rul for output"<<endl;
+ exit(1);
+ }
+ switch(evolve_type)
+ {
+ case 0:
+ reFile<<popu[bestfit].formRuleSet(baseRule);
+ break;
+ case 1:
+ reFile<<popu[bestfit].formRuleSet_RM(baseRule,range);
+ break;
+ default:
+ reFile<<popu[bestfit].formRuleSet_RMT(baseRule,range);
+ }
+
+ reFile<<"generation = "<<idx<<endl;
+ reFile<<"fitness="<<fitvec[bestfit]<<endl<<endl;
+ reFile<<dataFileName<<endl;
+ reFile<<baseRuleName<<endl;
+ reFile<<resultFileName<<endl;
+ reFile<<mutateName<<endl;
+ reFile<<shift<<" 0:no shift 1:shit during selection"<<endl;
+ reFile<<flag_c<<" 0:uniform 1:one point 2:two point crossover"<<endl;
+ reFile<<c_rate<<" crossover probability"<<endl;
+ reFile<<m_rate<<" mutation probability"<<endl;
+ reFile<<m_flag<<" ga_adapt rule, 1:yes, 0:no"<<endl;
+ reFile<<evolve_type<<" 0:r 1:rm 2:rmt"<<endl;
+ reFile<<generation<<" generation number"<<endl;
+ reFile<<p_size<<" population size"<<endl;
+ reFile<<ruleNo<<" maximum rule number"<<endl;
+ reFile<<divisionNo<<" Number of division for each variable"<<endl;
+ reFile<<patNo<<" training pattern number"<<endl;
+ reFile<<criterion<<" fitness criterion "<<endl;
+ reFile<<fuzzyFlag<<" 0:minimum 1:average"<<endl;
+ reFile<<defuzzyFlag<<" 0:maximum 1:without overlap 2:with overlap"<<endl;
+ reFile<<ruleEffectFlag<<" 1: maximum of output values from each rule 0:add"<<endl;
+
+ reFile.close();
+}
+
+static void ga_post_processing (void)
+{
+ char Msg[NAME_MAX];
+ clrscr();
+ strcpy(Msg,"move ");
+ strcat(Msg,resultFileName);
+ cout<<"rename "<<resultFileName<<" Y/N? ";
+ char condition;
+ cin>>condition;
+ switch(condition)
+ {
+ case 'Y':
+ case 'y':
+ cout<<"input destination file name: ";
+ char Mmsg[80];
+ cin>>Mmsg;
+ strcat(Msg," ");
+ strcat(Msg,Mmsg);
+ system(Msg);
+ break;
+ default:
+ cout<<"don't change file name"<<endl;
+ }
+}
diff --git a/fu_ga_fs/headfile.h b/fu_ga_fs/headfile.h
new file mode 100644
index 0000000..a2d5774
--- /dev/null
+++ b/fu_ga_fs/headfile.h
@@ -0,0 +1,9 @@
+#include <iostream.h>
+#include <fstream.h>
+#include <assert.h>
+#include <string.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <math.h>
+#include <time.h>
+#include <graphics.h>
diff --git a/fu_ga_fs/memberf.cpp b/fu_ga_fs/memberf.cpp
new file mode 100644
index 0000000..174ef6a
--- /dev/null
+++ b/fu_ga_fs/memberf.cpp
@@ -0,0 +1,506 @@
+#include "headfile.h"
+
+#include "memberf.h"
+
+float LeftTriangle(float,float,float);
+float RightTriangle(float,float,float);
+float Triangle(float,float,float);
+float Sigmoid(float,float,float);
+float reverseSigmoid(float,float,float);
+float Gaussian(float,float,float);
+
+vector<float> antiLeftTriangle(float,float,float);
+vector<float> antiRightTriangle(float,float,float);
+vector<float> antiTriangle(float,float,float);
+vector<float> antiSigmoid(float,float,float);
+vector<float> antiReverseSigmoid(float,float,float);
+vector<float> antiGaussian(float,float,float);
+
+//operators
+FuzzyMember&
+FuzzyMember::operator =(const FuzzyMember& a)
+{
+ if ((&a)==this) return *this;
+ delete []functionType;
+ startPoint=a.startPoint;
+ endPoint=a.endPoint;
+ int length=strlen(a.functionType)+2;
+ functionType= new char[length];
+ assert(functionType !=0);
+ strncpy(functionType,a.functionType,length);
+ return *this;
+}
+
+int
+FuzzyMember::operator ==(const FuzzyMember& a) const
+{
+ int tmp=1;
+ if ((&a)==this) return 1;
+ MyString str1(functionType);
+ MyString str2(a.functionType);
+ if (str1==str2)
+ {
+ if (startPoint !=a.startPoint)
+ tmp=0;
+ if (endPoint !=a.endPoint)
+ tmp=0;
+ }
+ else
+ return 0;
+ return tmp;
+}
+
+int
+FuzzyMember::operator <(const FuzzyMember& a) const
+{
+ int sum=0;
+ if (startPoint<a.startPoint) sum++;
+ if (endPoint<a.endPoint) sum++;
+ if ((startPoint<=a.startPoint)&&(endPoint<=a.endPoint)&&(sum>=1)) return 1;
+ else return 0;
+}
+
+int
+FuzzyMember::operator >(const FuzzyMember& a) const
+{
+ int sum=0;
+ if (endPoint>a.endPoint) sum++;
+ if (startPoint>a.startPoint) sum++;
+ if ((endPoint>=a.endPoint)&&(startPoint>=a.startPoint)&&sum>=1) return 1;
+ else return 0;
+}
+
+
+//member functions
+FuzzyMember&
+FuzzyMember::change_startPoint(const float& a)
+{
+ startPoint=a;
+ return *this;
+}
+
+FuzzyMember&
+FuzzyMember::change_endPoint(const float& a)
+{
+ endPoint=a;
+ return *this;
+}
+
+FuzzyMember&
+FuzzyMember::change_member(const float& a,const float& b, char *str)
+{
+ startPoint=a;
+ endPoint=b;
+ delete []functionType;
+ int length=strlen(str)+2;
+ functionType = new char[length];
+ assert(functionType !=0);
+ strncpy(functionType,str,length);
+ return *this;
+}
+
+int
+FuzzyMember::member_flag(const float& a) const
+{
+ int tmp=0;
+ if (strncmpi(functionType,"leftTriangle",strlen(functionType)-1)==0)
+ {
+ if (a<endPoint)
+ tmp=1;
+ }
+ else if (strncmpi(functionType,"rightTriangle",strlen(functionType)-1)==0)
+ {
+ if (a>startPoint)
+ tmp=1;
+ }
+ else if (strncmpi(functionType,"Triangle",strlen(functionType)-1)==0)
+ { //Triangle
+ if ((a>startPoint)&&(a<endPoint))
+ tmp=1;
+ }
+ else
+ { //Sigmoid and reverSigmoid and Gaussian
+ tmp=1;
+ }
+ return tmp;
+}
+
+FuzzyMember&
+FuzzyMember::change_functionType(const int& a)
+{
+ int length;
+ delete []functionType;
+ switch(a)
+ {
+ case 1:
+ length=strlen("leftTriangle")+2;
+ functionType=new char[length];
+ assert(functionType !=0);
+ strncpy(functionType,"leftTriangle",length);
+ break;
+ case 2:
+ length=strlen("rightTriangle")+2;
+ functionType=new char[length];
+ assert(functionType !=0);
+ strncpy(functionType,"rightTriangle",length);
+ break;
+ case 3:
+ length=strlen("Triangle")+2;
+ functionType=new char[length];
+ assert(functionType !=0);
+ strncpy(functionType,"Triangle",length);
+ break;
+ case 4:
+ length=strlen("Sigmoid")+2;
+ functionType=new char[length];
+ assert(functionType !=0);
+ strncpy(functionType,"Sigmoid",length);
+ break;
+ case 5:
+ length=strlen("reverseSigmoid")+2;
+ functionType=new char[length];
+ assert(functionType !=0);
+ strncpy(functionType,"reverseSigmoid",length);
+ break;
+ case 6:
+ length=strlen("Gaussian")+2;
+ functionType=new char[length];
+ assert(functionType !=0);
+ strncpy(functionType,"Gaussian",length);
+ break;
+ default:
+ cout<<"unknown function type in memberf.cpp, line 187"<<endl;
+ exit(1);
+ }
+ return *this;
+}
+
+int
+FuzzyMember::setTypeFlag() const
+{
+ int tmp;
+ if (strncmpi(functionType,"leftTriangle",strlen(functionType))==0)
+ tmp=1;
+ else if (strncmpi(functionType,"rightTriangle",strlen(functionType))==0)
+ tmp=2;
+ else if (strncmpi(functionType,"Triangle",strlen(functionType))==0)
+ tmp=3;
+ else if (strncmpi(functionType,"Sigmoid",strlen(functionType))==0)
+ tmp=4;
+ else if (strncmpi(functionType,"reverseSigmoid",strlen(functionType))==0)
+ tmp=5;
+ else if (strncmpi(functionType,"Gaussian",strlen(functionType))==0)
+ tmp=6;
+ else
+ tmp=0;
+ return tmp;
+}
+
+float
+FuzzyMember::memberFunction(const float& a) const
+{
+ float tmp;
+ switch(this->setTypeFlag())
+ {
+ case 1:
+ tmp=LeftTriangle(a,startPoint,endPoint);
+ break;
+ case 2:
+ tmp=RightTriangle(a,startPoint,endPoint);
+ break;
+ case 3:
+ tmp=Triangle(a,startPoint,endPoint);
+ break;
+ case 4:
+ tmp=Sigmoid(a,startPoint,endPoint);
+ break;
+ case 5:
+ tmp=reverseSigmoid(a,startPoint,endPoint);
+ break;
+ case 6:
+ tmp=Gaussian(a,startPoint,endPoint);
+ break;
+ default:
+ cout<<"unknown fuzzySet type"<<endl;
+ exit(1);
+ }
+ return tmp;
+}
+
+float
+FuzzyMember::not(const float& a) const
+{
+ float tmp;
+ tmp=this->memberFunction(a);
+ if (tmp>=0.9999)
+ tmp=0.0001;
+ else
+ tmp=1-tmp;
+ return tmp;
+}
+
+
+vector<float>
+FuzzyMember::membership2input(const float& a) const
+{
+ vector<float> tmp(2);
+ switch(this->setTypeFlag())
+ {
+ case 1:
+ tmp=antiLeftTriangle(a,startPoint,endPoint);
+ break;
+ case 2:
+ tmp=antiRightTriangle(a,startPoint,endPoint);
+ break;
+ case 3:
+ tmp=antiTriangle(a,startPoint,endPoint);
+ break;
+ case 4:
+ tmp=antiSigmoid(a,startPoint,endPoint);
+ break;
+ case 5:
+ tmp=antiReverseSigmoid(a,startPoint,endPoint);
+ break;
+ case 6:
+ tmp=antiGaussian(a,startPoint,endPoint);
+ break;
+ default:
+ cout<<"unknown fuzzySet type"<<endl;
+ exit(1);
+ }
+ return tmp;
+}
+
+vector<float>
+FuzzyMember::centroid(const float& a,const float& b) const
+{
+ assert(b !=0);
+ vector<float> res(2);
+ res[0]=0;
+ res[1]=0;
+ int total=(int)((endPoint-startPoint)/b);
+ for (int i=0;i<total;i++)
+ {
+ float tmp1=startPoint+i*b;
+ float tmp2=memberFunction(tmp1);
+ if (tmp2>a)
+ tmp2=a;
+ res[0] +=tmp2*tmp1;
+ res[1] +=tmp2;
+ }
+ return res;
+}
+
+//friend I/O operators
+istream& operator >> (istream& is, FuzzyMember& a)
+{
+ char tmp[256];
+ float b1,b2;
+ is>>tmp>>b1>>b2;
+ a.change_member(b1,b2,tmp);
+ return is;
+}
+
+ostream& operator << (ostream& os, const FuzzyMember& a)
+{
+ os<<"\t"<<a.functionType<<"\t"<<a.startPoint<<"\t"<<a.endPoint;
+ os<<endl;
+ return os;
+}
+
+//local auxiliary function
+float
+LeftTriangle(float a, float firstPoint,float secondPoint)
+{
+ float tmp;
+ if (a>=secondPoint)
+ {
+ fprintf(stderr," %f is outside the LeftTraingle rangle(%f,%f)",a,firstPoint,secondPoint);
+ exit(1);
+ }
+ if (a<=firstPoint)
+ tmp=1.0;
+ else
+ tmp=(secondPoint-a)/(secondPoint-firstPoint);
+ return tmp;
+}
+
+float
+RightTriangle(float a, float firstPoint,float secondPoint)
+{
+ float tmp;
+ if (a<=firstPoint)
+ {
+ fprintf(stderr," %f is outside the rightTraingle rangle(%f,%f)",a,firstPoint,secondPoint);
+ exit(1);
+ }
+ if (a>=secondPoint)
+ tmp=1.0;
+ else
+ tmp=(a-firstPoint)/(secondPoint-firstPoint);
+ return tmp;
+}
+
+float
+Triangle(float a, float firstPoint,float secondPoint)
+{
+ float tmp,med;
+ if ((a<=firstPoint)||(a>=secondPoint))
+ {
+ fprintf(stderr," %f is outside the Traingle rangle(%f,%f)",a,firstPoint,secondPoint);
+ exit(1);
+ }
+ med=(secondPoint-firstPoint)/2;
+ if (med==0)
+ {
+ fprintf(stderr,"Triangle is a line, range (%f,%f)",firstPoint,secondPoint);
+ exit(1);
+ }
+ if (a<=(med+firstPoint))
+ tmp=(a-firstPoint)/med;
+ else
+ tmp=(secondPoint-a)/med;
+ return tmp;
+}
+
+float
+Sigmoid(float a, float firstPoint,float secondPoint)
+{
+ float tmp=((a-firstPoint)*12)/(secondPoint-firstPoint);
+ float result=1.0/(1.0+exp(-tmp+6));
+ if (result<0.00001)
+ result=0.00001;
+ return result;
+}
+
+float
+reverseSigmoid(float a, float firstPoint,float secondPoint)
+{
+ float result=1-Sigmoid(a,firstPoint,secondPoint);
+ if (result<0.00001)
+ result=0.00001;
+ return result;
+}
+
+float
+Gaussian(float a,float firstPoint,float secondPoint)
+{
+ float tmp=((a-firstPoint)*8.0)/(secondPoint-firstPoint) -4;
+ float result=exp(-0.5*tmp*tmp);
+ if (result<0.00001)
+ result=0.00001;
+ return result;
+}
+
+vector<float>
+antiLeftTriangle(float a, float firstPoint,float secondPoint)
+{
+ vector<float> tmp(2);
+ if (a>1||a<0)
+ {
+ fprintf(stderr," %f is outside the LeftTraingle value[0,1]",a);
+ exit(1);
+ }
+ if (a==0) {tmp[0]=tmp[1]=secondPoint; return tmp;}
+ if (a==1) {tmp[0]=tmp[1]=firstPoint; return tmp;}
+ tmp[0]=secondPoint-(a*(secondPoint-firstPoint));
+ tmp[1]=tmp[0];
+ return tmp;
+}
+
+vector<float>
+antiRightTriangle(float a, float firstPoint,float secondPoint)
+{
+ vector<float> tmp(2);
+ if (a>1||a<0)
+ {
+ fprintf(stderr," %f is outside the LeftTraingle value[0,1]",a);
+ exit(1);
+ }
+ if (a==0) {tmp[0]=tmp[1]=firstPoint; return tmp;}
+ if (a==1) {tmp[0]=tmp[1]=secondPoint; return tmp;}
+ tmp[0]=a*(secondPoint-firstPoint) + firstPoint;
+ tmp[1]=tmp[0];
+ return tmp;
+}
+
+vector<float>
+antiTriangle(float a, float firstPoint,float secondPoint)
+{
+ vector<float> tmp(2);
+ if (a>1||a<0)
+ {
+ fprintf(stderr," %f is outside the antiTraingle value[0,1]",a);
+ exit(1);
+ }
+ if (a==0) {tmp[0]=firstPoint;tmp[1]=secondPoint; return tmp;}
+ if (a==1) {tmp[0]=tmp[1]=0.5*(secondPoint-firstPoint)+firstPoint; return tmp;}
+ float med=(secondPoint-firstPoint)/2;
+ tmp[0]=a*med+firstPoint;
+ tmp[1]=secondPoint-a*med;
+ return tmp;
+}
+
+vector<float>
+antiSigmoid(float a, float firstPoint,float secondPoint)
+{
+ vector<float> tmp(2);
+ if (a>1||a<0)
+ {
+ fprintf(stderr," %f is outside the antiSigmoid value[0,1]",a);
+ exit(1);
+ }
+ float tmpfloat;
+ if (a>0.1)
+ {
+ tmpfloat=(1-a)/a;
+ tmpfloat=log(tmpfloat);
+ tmpfloat=6-tmpfloat;
+ }
+ else
+ {
+ tmpfloat=a/(1-a);
+ tmpfloat=log(tmpfloat);
+ tmpfloat=tmpfloat+6;
+ }
+ tmpfloat=(tmpfloat*(secondPoint-firstPoint))/12;
+ tmp[0]=tmpfloat+firstPoint;
+ tmp[1]=tmp[0];
+ return tmp;
+}
+
+vector<float>
+antiReverseSigmoid(float a, float firstPoint,float secondPoint)
+{
+ vector<float> tmp(2);
+ if (a>1||a<0)
+ {
+ fprintf(stderr," %f is outside the antiReverseSigmoid value[0,1]",a);
+ exit(1);
+ }
+ float tmpfloat=1-a;
+ tmp=antiSigmoid(tmpfloat,firstPoint,secondPoint);
+ return tmp;
+}
+
+vector<float>
+antiGaussian(float a,float firstPoint,float secondPoint)
+{
+ vector<float> tmp(2);
+ if (a>1||a<0)
+ {
+ fprintf(stderr," %f is outside the antiGaussian value[0,1]",a);
+ exit(1);
+ }
+ assert(a>0);
+ float tmpfloat=-2.0*log(a);
+ assert(tmpfloat>=0);
+ tmp[1]=sqrt(tmpfloat);
+ tmp[0]=-tmp[1];
+ tmp[0]=tmp[0]+4;
+ tmp[1]=tmp[1]+4;
+ tmp[0]=(tmp[0]*(secondPoint-firstPoint))/8+firstPoint;
+ tmp[1]=(tmp[1]*(secondPoint-firstPoint))/8+firstPoint;
+ return tmp;
+}
+
diff --git a/fu_ga_fs/memberf.h b/fu_ga_fs/memberf.h
new file mode 100644
index 0000000..bbf96da
--- /dev/null
+++ b/fu_ga_fs/memberf.h
@@ -0,0 +1,75 @@
+#ifndef __MEMBERF_H__
+#define __MEMBERF_H__
+
+#include "vector.h"
+#include "mystring.h"
+
+class FuzzyMember
+{
+private:
+ float startPoint;
+ float endPoint;
+ char *functionType;
+
+public:
+ //constructor
+ FuzzyMember():startPoint(0),endPoint(0),functionType(0) {}
+ FuzzyMember(float a,float b, char *str);
+ FuzzyMember(const FuzzyMember& a);
+
+ //destructor
+ ~FuzzyMember(){delete []functionType;}
+
+ //member fuction
+ float memberFunction(const float& a) const;
+ float not(const float& a) const;
+ vector<float> membership2input(const float& a) const ;
+ float get_startPoint() const {return startPoint;}
+ float get_endPoint() const {return endPoint;}
+ FuzzyMember& change_startPoint(const float& a);
+ FuzzyMember& change_endPoint(const float& a);
+ char* get_functionType() const {return functionType;}
+ int member_flag(const float& a) const; //a belong to this member
+ int setTypeFlag() const; //0:unknown 1:leftT 2:rightT 3:T
+ FuzzyMember& change_member(const float& a,const float& b,char *str);
+ FuzzyMember& change_functionType(const int& a);
+ vector<float> centroid(const float& a,const float& b) const;
+
+ //operators
+ FuzzyMember& operator =(const FuzzyMember& a);
+ int operator ==(const FuzzyMember& a) const;
+ int operator < (const FuzzyMember& a) const;
+ //the FuzzyMember is left to a);
+ int operator > (const FuzzyMember& a) const;
+ //the FuzzyMember is right to a);
+
+ //friend operator I/O
+ friend istream& operator >> (istream& is,FuzzyMember& a);
+ friend ostream& operator << (ostream& os,const FuzzyMember& a);
+};
+
+inline FuzzyMember::FuzzyMember(float a,float b,char* str):
+ startPoint(a),endPoint(b)
+{
+ assert(startPoint<=endPoint);
+ int length=strlen(str)+2;
+
+
+ functionType= new char[length];
+ assert(functionType !=0);
+ strncpy(functionType,str,length);
+
+
+}
+
+inline FuzzyMember::FuzzyMember(const FuzzyMember& a):
+ startPoint(a.startPoint),endPoint(a.endPoint)
+{
+ assert(startPoint<=endPoint);
+ int length=strlen(a.functionType)+2;
+
+ functionType= new char[length];
+ assert(functionType !=0);
+ strncpy(functionType,a.functionType,length);
+}
+#endif
diff --git a/fu_ga_fs/misc.h b/fu_ga_fs/misc.h
new file mode 100644
index 0000000..96344a7
--- /dev/null
+++ b/fu_ga_fs/misc.h
@@ -0,0 +1,7 @@
+#ifndef __MISC_H__
+#define __MISC_H__
+
+extern float average(vector<float>);
+extern float variance(vector<float>,float);
+
+#endif
diff --git a/fu_ga_fs/mystring.cpp b/fu_ga_fs/mystring.cpp
new file mode 100644
index 0000000..0edf19f
--- /dev/null
+++ b/fu_ga_fs/mystring.cpp
@@ -0,0 +1,200 @@
+#include "headfile.h"
+#include "mystring.h"
+
+MyString::MyString(int a):
+ stringSize(a),currentPosition(0)
+{
+ stringPtr=new char[stringSize+1];
+ assert(stringPtr !=0);
+}
+
+MyString::MyString(char *str):
+ currentPosition(0)
+{
+ int length=strlen(str);
+ stringSize=length;
+ stringPtr= new char[stringSize+1];
+ assert(stringPtr !=0);
+ strncpy(stringPtr,str,length+1);
+}
+
+MyString::MyString(const MyString& a)
+{
+ stringSize=a.stringSize;
+ currentPosition=a.currentPosition;
+ stringPtr=new char[stringSize+1];
+ assert(stringPtr !=0);
+ strncpy(stringPtr,a.stringPtr,stringSize+1);
+}
+
+//member functions
+MyString&
+MyString::change_stringSize(const int& a)
+{
+ assert(a>=0);
+ stringSize=a;
+ return *this;
+}
+
+MyString&
+MyString::change_currentPosition(const int& a)
+{
+ assert(a>=0&&a<stringSize);
+ currentPosition=a;
+ return *this;
+}
+
+MyString&
+MyString::change_stringContent(char *str)
+{
+ delete []stringPtr;
+ int length=strlen(str);
+ stringSize=length;
+ currentPosition=0;
+ stringPtr=new char[stringSize+1];
+ assert(stringPtr !=0);
+ strncpy(stringPtr,str,length+1);
+
+ return *this;
+}
+
+int
+MyString::findNextF(char ch) const
+{
+ int tmp=-1;
+ for (int i=currentPosition;i<stringSize;i++)
+ {
+ if (stringPtr[i]==ch)
+ {
+ tmp=i;
+ break;
+ }
+ }
+ return tmp;
+}
+
+int
+MyString::findNextB(char ch) const
+{
+ int tmp=-1;
+ for (int i=currentPosition;i>=0;i--)
+ {
+ if (stringPtr[i]==ch)
+ {
+ tmp=i;
+ break;
+ }
+ }
+ return tmp;
+}
+
+int
+MyString::totalNumberF(char ch) const
+{
+ int sum=0;
+ for (int i=currentPosition;i<stringSize;i++)
+ {
+ if (stringPtr[i]==ch)
+ sum++;
+ }
+ return sum;
+}
+
+int
+MyString::totalNumberB(char ch) const
+{
+ int sum=0;
+ for (int i=currentPosition;i>=0;i--)
+ {
+ if (stringPtr[i]==ch)
+ sum++;
+ }
+ return sum;
+}
+
+MyString
+MyString::get_subString(const int& a)
+{
+ assert((currentPosition+a)<=stringSize);
+ MyString substr(a);
+ for (int i=currentPosition;i<(currentPosition+a);i++)
+ substr.stringPtr[i-currentPosition]=stringPtr[i];
+
+ substr.stringPtr[a]='\0';
+ return substr;
+}
+
+//operators
+
+char&
+MyString::operator [] (int i) const
+{
+ assert(i>=0&&i<stringSize);
+ return stringPtr[i];
+}
+
+MyString&
+MyString::operator =(const MyString& a)
+{
+ if ((&a)==this) return *this;
+ delete []stringPtr;
+ stringSize=a.stringSize;
+ currentPosition=a.currentPosition;
+ stringPtr=new char[stringSize+1];
+ assert(stringPtr !=0);
+ strncpy(stringPtr,a.stringPtr,stringSize+1);
+
+ return *this;
+}
+
+int
+MyString::operator ==(const MyString& a) const
+{
+ if ((&a)==this) return 1;
+
+ if (stringSize !=a.stringSize) return 0;
+
+ int tmp=1;
+ for (int i=0;i<stringSize;i++)
+ {
+ if (stringPtr[i] !=a.stringPtr[i])
+ {
+ tmp=0;
+ break;
+ }
+ }
+
+ return tmp;
+}
+
+
+// friend I/O operators
+
+ostream& operator <<(ostream& os,const MyString& a)
+{
+ os<<a.stringPtr<<endl;
+ return os;
+}
+
+istream& operator >>(istream& is,MyString& a)
+{
+ char tmp[256];
+ is>>tmp;
+ int length=strlen(tmp);
+ a.stringSize=length;
+ a.currentPosition=0;
+ delete []a.stringPtr;
+ a.stringPtr=new char[length+1];
+ assert(a.stringPtr !=0);
+ strncpy(a.stringPtr,tmp,length+1);
+
+ return is;
+}
+
+
+
+
+
+
+
+
diff --git a/fu_ga_fs/mystring.h b/fu_ga_fs/mystring.h
new file mode 100644
index 0000000..0a2152f
--- /dev/null
+++ b/fu_ga_fs/mystring.h
@@ -0,0 +1,46 @@
+#ifndef __MYSTRING_H__
+#define __MYSTRING_H__
+
+class MyString
+{
+private:
+ int stringSize;
+ char *stringPtr;
+ int currentPosition;
+
+public:
+ //constructors
+ MyString():stringSize(0),stringPtr(0),currentPosition(0) {}
+ MyString(int a);
+ MyString(char * str);
+ MyString(const MyString& a);
+
+ //destructor
+ ~MyString() {delete []stringPtr;}
+
+ //member functions
+ int get_stringSize() const {return stringSize;}
+ int get_currentPosition() const {return currentPosition;}
+ char* get_stringPtr() const {return stringPtr;}
+
+ MyString& change_stringSize(const int& a);
+ MyString& change_currentPosition(const int& a);
+ MyString& change_stringContent(char *str);
+
+ int findNextF(char ch) const;
+ int findNextB(char ch) const;
+ int totalNumberF(char ch) const;
+ int totalNumberB(char ch) const;
+ MyString get_subString(const int& a); //a: size of subString
+ //from current position
+
+ // operators
+ char& operator [] (int i) const;
+ MyString& operator =(const MyString& a);
+ int operator ==(const MyString& a) const;
+
+ //friend I/O operators
+ friend ostream& operator <<(ostream& os, const MyString& a);
+ friend istream& operator >>(istream& is, MyString& a);
+};
+#endif
diff --git a/fu_ga_fs/popuint.cpp b/fu_ga_fs/popuint.cpp
new file mode 100644
index 0000000..74839e0
--- /dev/null
+++ b/fu_ga_fs/popuint.cpp
@@ -0,0 +1,371 @@
+#include "popuint.h"
+
+int search(int); /* search the second Kid index */
+void reorder(int *,int, int); /* reorder the remain individual indexs */
+
+//constructors
+PopulationInt::PopulationInt(int a,int b):
+length(a),width(b),c_rate(0),m_rate(0)
+{
+ ptr=new IndividualInt[length];
+ assert(ptr !=0);
+}
+
+PopulationInt::PopulationInt(int a,int b,float c=0,float d=0):
+length(a),width(b),c_rate(c),m_rate(d)
+{
+ ptr=new IndividualInt[length];
+ assert(ptr !=0);
+}
+
+PopulationInt::PopulationInt(const PopulationInt& a):
+length(a.length),width(a.width),c_rate(a.c_rate),m_rate(a.m_rate)
+{
+ delete []ptr;
+ ptr= new IndividualInt[length];
+ for (int i=0;i<length;i++)
+ ptr[i]=a.ptr[i];
+}
+
+PopulationInt&
+PopulationInt::change_length(const int& a)
+{
+ assert(a>=0);
+ length = a;
+ delete []ptr;
+ ptr= new IndividualInt[length];
+ assert(ptr !=0);
+ for (int i=0;i<length;i++)
+ ptr[i].change_mrate(m_rate);
+ return *this;
+}
+
+PopulationInt&
+PopulationInt::change_mrate(const float& a)
+{
+ assert(a>=0&&a<=1);
+ m_rate=a;
+ for (int i=0;i<length;i++)
+ ptr[i].change_mrate(a);
+ return *this;
+}
+
+PopulationInt&
+PopulationInt::initialize_range(const IndividualInt& a)
+{
+ time_t t;
+ srand((unsigned) time(&t));
+ if (((width-1)%(a.get_length()-1))!=0)
+ {
+ cerr<<"dimendions doesn't match in initialize_range of popuint.cpp"<<endl;
+ exit(1);
+ }
+ IndividualInt tmp(width,m_rate);
+ for (int i=0;i<length;i++)
+ ptr[i]=tmp.initialize_range(a);
+ return *this;
+}
+
+PopulationInt&
+PopulationInt::initialize_range_RM(const IndividualInt& a)
+{
+ time_t t;
+ srand((unsigned) time(&t));
+
+ IndividualInt tmp(width,m_rate);
+ for (int i=0;i<length;i++)
+ ptr[i]=tmp.initialize_range_RM(a);
+ return *this;
+}
+
+PopulationInt&
+PopulationInt::initialize_range_RMT(const IndividualInt& a)
+{
+ time_t t;
+ srand((unsigned) time(&t));
+
+ IndividualInt tmp(width,m_rate);
+ for (int i=0;i<length;i++)
+ ptr[i]=tmp.initialize_range_RMT(a);
+ return *this;
+}
+
+PopulationInt&
+PopulationInt::mutate_one(const IndividualInt& a,const int& b)
+{
+ time_t t;
+ srand((unsigned) time(&t));
+
+ for (int i=0;i<length;i++)
+ {
+ if (i !=b)
+ ptr[i].mutate_one(a);
+ }
+ return *this;
+}
+
+PopulationInt&
+PopulationInt::mutate_one_RM(const IndividualInt& a,const int& b)
+{
+ time_t t;
+ srand((unsigned) time(&t));
+
+ for (int i=0;i<length;i++)
+ {
+ if (i !=b)
+ ptr[i].mutate_one_RM(a);
+ }
+ return *this;
+}
+
+PopulationInt&
+PopulationInt::mutate_one_RMT(const IndividualInt& a,const int& b)
+{
+ time_t t;
+ srand((unsigned) time(&t));
+
+ for (int i=0;i<length;i++)
+ {
+ if (i !=b)
+ ptr[i].mutate_one_RMT(a);
+ }
+ return *this;
+}
+
+PopulationInt&
+PopulationInt::crossover(const int& a,const int& b)
+{
+ time_t t;
+ srand((unsigned) time(&t));
+
+ int *sg; /* sg[popsize]; to store the flag of beibg chosen or not */
+ sg=new int[length];
+ /* initialization */
+ /* at the begining, all have not been selected */
+ /* sg[i] store i, and rno=popsize */
+ for (int i=0;i<length;i++)
+ sg[i]=i;
+ int rno=length; //rno: no. of remaining unchosen individuals
+ int k1,k2; //k1: first chosen kid k2: second chosen kid
+
+ /* begin crossover among population */
+ for (i=0;i<(length/2+1);i++)
+ {
+ if (rno>=2) /* at least two indivuals remain unchosen */
+ {
+ //chosen two individuals to be crossovered
+ int j=search(rno); /* find first kid */
+ k1=sg[j];
+ rno=rno-1;
+ reorder(sg,rno,j); /* reorder the sign vector */
+ j=search(rno); /* find second kid */
+ k2=sg[j];
+ rno=rno-1;
+ reorder(sg,rno,j); /* reorder the sign vector */
+ float prob=(1.0*random(1000))/1000;
+ if (prob<=c_rate) /* probability for crossover */
+ {
+ if ((k1 !=b)&&(k2 !=b))
+ {
+ switch(a)
+ {
+ case 0:
+ crossoverUniform(ptr[k1],ptr[k2]);
+ break;
+ case 1:
+ crossoverOP(ptr[k1],ptr[k2]);
+ break;
+ default:
+ crossoverTP(ptr[k1],ptr[k2]);
+ }
+ }
+ }
+ }
+ }
+ delete[]sg;
+ return *this;
+}
+
+PopulationInt&
+PopulationInt::selection(const vector<float>& a,const int& b,const int& c)
+ //a: fitness vector b: best indi index c: shift flag (1: yes, 0: no)
+{
+ time_t t;
+ srand((unsigned) time(&t));
+
+ int i,j,k;
+ float *Prob; //probability vector of indi to be selected
+ Prob=new float[length];
+
+ int* flag; //information for selected times
+ flag=new int[length];
+
+ int no;
+ float mini,remain;
+
+ /* shift the fitness */
+ /* find the minimial fitness */
+ if (c==1)
+ {
+ mini=a.minimum();
+ if (mini<0.1)
+ remain=mini;
+ else
+ remain=0.1;
+
+ for (i=0;i<length;i++)
+ a[i]=a[i]-mini+remain;
+ }
+ /* calculation the sum of the fitness */
+ float Sum=0.0;
+ for (i=0;i<length;i++)
+ Sum+=a[i];
+ if (Sum==0)
+ return *this;
+
+ /* calculate the probability vector of individuals */
+ float SumProb=0.0;
+ for (i=0;i<length;i++)
+ {
+ Prob[i]=a[i]/Sum+SumProb;
+ SumProb+=a[i]/Sum;
+ }
+
+ /* generate new population */
+
+ /* set all flags to be zero, means no one has been selected */
+ for (i=0;i<length;i++)
+ flag[i]=0;
+
+ flag[b]=1; /* keep the best */
+ /* set the flags for all individuals */
+ for (i=0;i<(length-1);i++)
+ {
+ /* turn wheel to see which individual is selected */
+ float prob=(1.0*random(1000))/1000;
+ k=0;
+ for (j=0;j<length;j++)
+ {
+ if (prob>=Prob[j])
+ k=j+1;
+ }
+ flag[k]+=1;
+ }
+ /* form the new population */
+ for (i=0;i<length;i++)
+ {
+ if (flag[i]==0)
+ {
+ no=0;
+ for (j=0;j<length;j++)
+ {
+ if (flag[j]>1)
+ {
+ k=j;
+ no=no+1;
+ break;
+ }
+ }
+ if (no==0)
+ {
+ printf("something wrong in selection \n");
+ exit(1);
+ }
+ flag[k]=flag[k]-1;
+ //copy the selected individual to new individual
+ ptr[i]=ptr[k];
+
+ //get the flag for the new individual
+ flag[i] +=1;
+ }
+ }
+
+ /* check the selection */
+ for (i=0;i<length;i++)
+ {
+ if (flag[i]!=1)
+ {
+ printf("something wrong with selection \n");
+ exit(1);
+ }
+ }
+ delete []Prob;
+ delete []flag;
+ return *this;
+}
+
+vector<float>
+PopulationInt::fitness(const FuzzyRule& a,const array& b,const vector<int>& cn,const int& c,const int& d, const int& e) const
+{
+ vector<float> tmpvec(length);
+ for (int i=0;i<length;i++)
+ tmpvec[i]=ptr[i].fitness(a,b,cn,c,d,e);
+ return tmpvec;
+}
+
+vector<float>
+PopulationInt::fitness_RM(const FuzzyRule& a,const array& b,const vector<int>& cn,const int& c,const int& d, const int& e,const IndividualInt& f) const
+{
+ vector<float> tmpvec(length);
+ for (int i=0;i<length;i++)
+ tmpvec[i]=ptr[i].fitness_RM(a,b,cn,c,d,e,f);
+ return tmpvec;
+}
+
+vector<float>
+PopulationInt::fitness_RMT(const FuzzyRule& a,const array& b,const vector<int>& cn,const int& c,const int& d, const int& e,const IndividualInt& f) const
+{
+ vector<float> tmpvec(length);
+ for (int i=0;i<length;i++)
+ tmpvec[i]=ptr[i].fitness_RMT(a,b,cn,c,d,e,f);
+ return tmpvec;
+}
+
+//I/O operators
+ostream& operator<<(ostream& os,const PopulationInt& a)
+{
+ os<<a.length<<"\t"<<a.width<<"\t"<<a.c_rate<<"\t"<<a.m_rate<<endl;
+ for (int i=0;i<a.length;i++)
+ os<<a.ptr[i];
+ return os;
+}
+
+istream& operator>>(istream& is,PopulationInt& a)
+{
+ is>>a.length>>a.width>>a.c_rate>>a.m_rate;
+ if (a.ptr !=0)
+ delete []a.ptr;
+
+ a.ptr =new IndividualInt[a.length];
+ assert(a.ptr !=0);
+ IndividualInt tmp(a.width,a.m_rate);
+ for (int i=0;i<a.length;i++)
+ {
+ a.ptr[i]=tmp;
+ is>>a.ptr[i];
+ }
+ return is;
+}
+
+int
+search(int si) /* number of unchosen individuals */
+{
+ int re;
+ re=random(si);
+ return(re);
+}
+
+void
+reorder(int *vec,int si,int ind)
+/* int *vec; pointer to the sign vector */
+/* int si; number of unchosen individuals */
+/* int ind; selected and need to be removed */
+{
+ int i;
+ if (ind<si)
+ {
+ for (i=ind;i<si;i++)
+ *(vec+i)=*(vec+i+1);
+ }
+}
+
diff --git a/fu_ga_fs/popuint.h b/fu_ga_fs/popuint.h
new file mode 100644
index 0000000..3b7766e
--- /dev/null
+++ b/fu_ga_fs/popuint.h
@@ -0,0 +1,82 @@
+#ifndef __POPUINT_H__
+#define __POPUINT_H__
+#include "headfile.h"
+
+#include "chromint.h"
+
+template <class Type>
+class vector;
+
+class PopulationInt
+{
+private:
+ int length; //population size
+ int width; //individual length
+ IndividualInt* ptr; //pointer to the individual
+ float c_rate; //crossover rate
+ float m_rate; //mutation rate
+
+public:
+ PopulationInt():length(0),width(0),ptr(0),c_rate(0),m_rate(0) {}
+ PopulationInt(int a,int b);
+ PopulationInt(int a,int b,float c,float d);
+ PopulationInt(const PopulationInt& a);
+
+ //member function
+ int get_length() const {return length;}
+ int get_width() const {return width;}
+ float get_crate() const {return c_rate;}
+ float get_mrate() const {return m_rate;}
+
+ PopulationInt& change_length(const int& a);
+ PopulationInt& change_width(const int& a)
+ {assert(a>=0); width=a; return *this; }
+ PopulationInt& change_crate(const float& a)
+ {assert(a>=0&&a<=1); c_rate=a; return *this; }
+ PopulationInt& change_mrate(const float& a);
+ PopulationInt& initialize_range(const IndividualInt& a);
+ //a: rule range, for evolving rule set only
+ PopulationInt& initialize_range_RM(const IndividualInt& a);
+ //a: rule range, for evolving rule set and tuning membership functions
+ PopulationInt& initialize_range_RMT(const IndividualInt& a);
+ //a: rule range, for evolving rule set and tuning membership functions
+ PopulationInt& mutate_one(const IndividualInt& a,const int& b);
+ //a: rule range b: best fitness index
+ //for rule set only
+ PopulationInt& mutate_one_RM(const IndividualInt& a,const int& b);
+ //a: rule range b: best fitness index
+ //for rule set and membership functions
+ PopulationInt& mutate_one_RMT(const IndividualInt& a,const int& b);
+ //a: rule range b: best fitness index
+ //for rule set and membership functions and type
+ PopulationInt& crossover(const int& a, const int& b); //a: crossover flag
+ //0:uniform 1:one point 2: two point b: best individual index
+ PopulationInt& selection(const vector<float>& a,const int& b,const int& c);
+ //a: fitness vector b: best indi index c: shift flag (1: yes, 0: no)
+ vector<float> fitness(const FuzzyRule& a,const array& b,const vector<int>& cn,const int& c,const int& d,const int&e) const;
+ //a:base rule, b:input array c:ruleEffectFlag
+ //d:fuzzyFlag e:defuzzyFlag cn:class no. for output
+ //for evolving rule set only
+ vector<float> fitness_RM(const FuzzyRule& a,const array& b,const vector<int>& cn,const int& c,const int& d,const int& e,const IndividualInt& f) const;
+ //a:base rule, b:input array c:ruleEffectFlag
+ //d:fuzzyFlag e:defuzzyFlag cn:class no. for output
+ //f: range individual
+ //for evolving rule set and membership functions
+ vector<float> fitness_RMT(const FuzzyRule& a,const array& b,const vector<int>& cn,const int& c,const int& d,const int& e,const IndividualInt& f) const;
+ //a:base rule, b:input array c:ruleEffectFlag
+ //d:fuzzyFlag e:defuzzyFlag cn:class no. for output
+ //f: range individual
+ //for evolving rule set and membership functions
+
+ //operators
+ IndividualInt& operator [] (int i) const
+ {assert(i>=0&&i<length); return ptr[i];}
+
+ //I/O operators
+ friend ostream& operator<<(ostream& os,const PopulationInt& a);
+ friend istream& operator>>(istream& is,PopulationInt& a);
+};
+#endif
+
+
+
diff --git a/fu_ga_fs/ruleset.cpp b/fu_ga_fs/ruleset.cpp
new file mode 100644
index 0000000..ca06be9
--- /dev/null
+++ b/fu_ga_fs/ruleset.cpp
@@ -0,0 +1,295 @@
+#include "headfile.h"
+#include "ruleset.h"
+
+//constructors
+FuzzyRuleSet::FuzzyRuleSet(int a):
+ ruleSetSize(a)
+{
+ rules = new FuzzyRule[ruleSetSize];
+ assert(rules !=0);
+}
+
+FuzzyRuleSet::FuzzyRuleSet(int a,FuzzyRule *b):
+ruleSetSize(a)
+{
+ rules=new FuzzyRule[ruleSetSize];
+ assert(rules !=0);
+ for (int i=0;i<ruleSetSize;i++)
+ rules[i]=b[i];
+}
+
+FuzzyRuleSet::FuzzyRuleSet(const FuzzyRuleSet& a):
+ruleSetSize(a.ruleSetSize)
+{
+ rules =new FuzzyRule[ruleSetSize];
+ assert(rules !=0);
+ for (int i=0;i<ruleSetSize;i++)
+ rules[i]=a.rules[i];
+}
+
+//member functions
+FuzzyRuleSet&
+FuzzyRuleSet::addRuleB(const FuzzyRule& a,const int& b)
+{
+ FuzzyRuleSet newSet(ruleSetSize,rules);
+ ruleSetSize++;
+ delete []rules;
+ rules =new FuzzyRule[ruleSetSize];
+ assert(rules !=0);
+ for (int i=0;i<b;i++)
+ rules[i]=newSet[i];
+ rules[b]=a;
+ for (i=b;i<(ruleSetSize-1);i++)
+ rules[i+1]=newSet[i];
+ return *this;
+}
+
+FuzzyRuleSet&
+FuzzyRuleSet::addRule(const FuzzyRule& a)
+{
+ FuzzyRuleSet newSet(ruleSetSize,rules);
+ delete []rules;
+ rules =new FuzzyRule[ruleSetSize+1];
+ assert(rules !=0);
+ for (int i=0;i<ruleSetSize;i++)
+ rules[i]=newSet[i];
+ rules[ruleSetSize]=a;
+ ruleSetSize++;
+ return *this;
+}
+
+FuzzyRuleSet&
+FuzzyRuleSet::deleteRule(const int& a)
+{
+ assert((a>=0)&&(a<ruleSetSize));
+ FuzzyRuleSet newSet(ruleSetSize,rules);
+ delete []rules;
+ ruleSetSize--;
+ rules =new FuzzyRule[ruleSetSize];
+ assert(rules !=0);
+ for (int i=0;i<a;i++)
+ rules[i]=newSet[i];
+ for (i=a;i<ruleSetSize;i++)
+ rules[i]=newSet[i+1];
+ return *this;
+}
+
+//a:input vector,
+//b: mode for antecedent--0:min 1:aver, c: mode for defuzzyfy--0:max 1:centroid without overlap
+//2: with overlap; output_add: add the contribution from each rule
+
+vector< vector<float> >
+FuzzyRuleSet::fuzzyOutputValue_max(const vector<float>& a, const int& b) const
+{
+ if (a.len() !=rules[0].get_variableSize())
+ {
+ fprintf(stderr,"input dim doesn't match the inputVariable no. of the rule");
+ exit(1);
+ }
+ int outVarDim=rules[0].get_outputSize(); //outputVariable no. in rules
+ vector< vector<float> > result(outVarDim);
+ vector<int> varDim(outVarDim);
+ for (int i=0;i<outVarDim;i++)
+ {
+ varDim[i]=rules[0].get_outputVariable(i).get_setSize();
+ result[i].changeSize(varDim[i]);
+ }
+
+ //initialization of result
+ for (i=0;i<outVarDim;i++)
+ for (int j=0;j<varDim[i];j++)
+ result[i][j]=0.0;
+
+ vector<float> tmpres(outVarDim);
+ for (i=0;i<ruleSetSize;i++)
+ {
+ int ter=rules[i].checkRuleActive(a);
+ if (ter==1)
+ {
+ vector<int> tmpvec=rules[i].get_outputSetFlag();
+ if (b==1)
+ tmpres=rules[i].FuzzyOutput_average(a);
+ else
+ tmpres=rules[i].FuzzyOutput(a);
+
+ for (int j=0;j<outVarDim;j++)
+ {
+ if (tmpvec[j] !=0)
+ result[j][abs(tmpvec[j])-1]=max(result[j][abs(tmpvec[j])-1],tmpres[j]);
+ }
+ }
+ }
+ return result;
+}
+
+vector< vector<float> >
+FuzzyRuleSet::fuzzyOutputValue_add(const vector<float>& a, const int& b) const
+{
+ if (a.len() !=rules[0].get_variableSize())
+ {
+ fprintf(stderr,"input dim doesn't match the inputVariable no. of the rule");
+ exit(1);
+ }
+ int outVarDim=rules[0].get_outputSize(); //outputVariable no. in rules
+ vector< vector<float> > result(outVarDim);
+ vector<int> varDim(outVarDim);
+ for (int i=0;i<outVarDim;i++)
+ {
+ varDim[i]=rules[0].get_outputVariable(i).get_setSize();
+ result[i].changeSize(varDim[i]);
+ }
+ //initialization of result
+ for (i=0;i<outVarDim;i++)
+ for (int j=0;j<varDim[i];j++)
+ result[i][j]=0.0;
+
+ vector<float> tmpres(outVarDim);
+ for (i=0;i<ruleSetSize;i++)
+ {
+ int ter=rules[i].checkRuleActive(a);
+ if (ter==1)
+ {
+ vector<int> tmpvec=rules[i].get_outputSetFlag();
+ if (b==1)
+ tmpres=rules[i].FuzzyOutput_average(a);
+ else
+ tmpres=rules[i].FuzzyOutput(a);
+
+ for (int j=0;j<outVarDim;j++)
+ {
+ if (tmpvec[j] !=0)
+ {
+ result[j][abs(tmpvec[j])-1] +=tmpres[j];
+ if ((result[j][abs(tmpvec[j])-1])>1)
+ result[j][abs(tmpvec[j])-1]=1;
+ }
+ }
+ }
+ }
+ return result;
+}
+
+vector<float>
+FuzzyRuleSet::defuzzify(const vector< vector<float> >& a,const int& b) const
+{
+ //get output variables in a rule
+ int outVarDim=rules[0].get_outputSize();
+ vector<float> tmp(outVarDim);
+ vector<int> varDim(outVarDim);
+ for (int i=0;i<outVarDim;i++)
+ {
+ //fuzzy set no.`in output variable i
+ varDim[i]=this->get_outputVariable(i).get_setSize();
+ //defuzzify for output variable i
+ if (b==0)
+ tmp[i]=this->get_outputVariable(i).defuzzifyMax(varDim[i],a[i]);
+ else if (b==1)
+ tmp[i]=this->get_outputVariable(i).defuzzyCentroid(varDim[i],a[i]);
+ else
+ tmp[i]=this->get_outputVariable(i).defuzzyCentroid_add(varDim[i],a[i]);
+ }
+ return tmp;
+}
+
+FuzzyVariable&
+FuzzyRuleSet::get_outputVariable(const int& a) const
+{
+ assert (a>=0&&a<rules[0].get_outputSize());
+ return rules[0].get_outputVariable(a);
+}
+
+int
+FuzzyRuleSet::checkRuleSetFired(const vector<float>& a) const
+{
+ assert(ruleSetSize);
+ assert(a.len()==rules[0].get_variableSize());
+ int sum=0;
+ for (int i=0;i<ruleSetSize;i++)
+ sum +=rules[i].checkRuleActive(a);
+ int tmp=0;
+ if (sum>0)
+ tmp=1;
+ return tmp;
+}
+
+vector<float>
+FuzzyRuleSet::output(const vector<float>& a, const int& b,const int& c,const int& d) const
+{ //a: input b:add/max c:min/aver d:max/without/with overlap
+ // return the value after defuzzify
+ if (a.len() !=rules[0].get_variableSize())
+ {
+ fprintf(stderr,"input dim doesn't match the inputVariable no. of the rule");
+ exit(1);
+ }
+ int outVarDim=rules[0].get_outputSize(); //outputVariable no. in rules
+ vector< vector<float> > result(outVarDim);
+ vector<int> varDim(outVarDim);
+ for (int i=0;i<outVarDim;i++)
+ {
+ varDim[i]=rules[0].get_outputVariable(i).get_setSize();
+ result[i].changeSize(varDim[i]);
+ } //allocate memory for result
+ if (b==1)
+ result=this->fuzzyOutputValue_max(a,c);
+ else
+ result= this->fuzzyOutputValue_add(a,c);
+ vector<float> tmp(outVarDim);
+ tmp=this->defuzzify(result,d);
+ return tmp;
+}
+
+//operators
+FuzzyRule&
+FuzzyRuleSet::operator [] (int i) const
+{
+ assert((i>=0)&&(i<ruleSetSize));
+ return rules[i];
+}
+
+FuzzyRuleSet&
+FuzzyRuleSet::operator =(const FuzzyRuleSet& a)
+{
+ if ((&a)==this) return *this;
+ delete []rules;
+ ruleSetSize=a.ruleSetSize;
+ rules =new FuzzyRule[ruleSetSize];
+ assert(rules !=0);
+ for (int i=0;i<ruleSetSize;i++)
+ rules[i]=a[i];
+ return *this;
+}
+
+//I/O operators
+istream& operator>>(istream& is, FuzzyRuleSet& a)
+{
+ is>>a.ruleSetSize;
+ if (a.rules !=0)
+ delete []a.rules;
+ a.rules =new FuzzyRule[a.ruleSetSize];
+ is>>a.rules[0];
+ vector<int> vecin(a.rules[0].get_variableSize());
+ vector<int> vecout(a.rules[0].get_outputSize());
+ for (int i=1;i<a.ruleSetSize;i++)
+ {
+ a.rules[i]=a.rules[0];
+ is>>vecin;
+ a.rules[i].change_inputSetFlag(vecin);
+ is>>vecout;
+ a.rules[i].change_outputSetFlag(vecout);
+ a.rules[i].form_ruleContent();
+ }
+ return is;
+}
+
+ostream& operator<<(ostream& os, const FuzzyRuleSet& a)
+{
+ assert(a.ruleSetSize !=0);
+ os<<a.ruleSetSize<<endl;
+ os<<a[0];
+ for (int i=1;i<a.ruleSetSize;i++)
+ os<<(a[i].get_inputSetFlag()|a[i].get_outputSetFlag());
+ return os;
+}
+
+
+
diff --git a/fu_ga_fs/ruleset.h b/fu_ga_fs/ruleset.h
new file mode 100644
index 0000000..9915ab2
--- /dev/null
+++ b/fu_ga_fs/ruleset.h
@@ -0,0 +1,54 @@
+#ifndef __RULESET_H__
+#define __RULESET_H__
+
+#include "fuzzyrul.h"
+#include "variablf.h"
+#include "memberf.h"
+#include "vector.h"
+
+class FuzzyRuleSet
+{
+private:
+ int ruleSetSize; //how many rules in the set
+ FuzzyRule* rules; //pointers to the fuzzy rule set
+
+public:
+ FuzzyRuleSet():ruleSetSize(0),rules(0) {}
+ FuzzyRuleSet(int a);
+ FuzzyRuleSet(int a, FuzzyRule *b);
+ FuzzyRuleSet(const FuzzyRuleSet& a);
+
+ ~FuzzyRuleSet() {delete []rules;}
+
+ //member functions
+ int get_ruleSetSize() const {return ruleSetSize;}
+ FuzzyRuleSet& addRuleB(const FuzzyRule& a,const int& b); //add rule a at position b
+ FuzzyRuleSet& addRule(const FuzzyRule& a); //add rule a at the end of set
+ FuzzyRuleSet& deleteRule(const int& a); //delete the 'a'th rule
+ vector< vector<float> > fuzzyOutputValue_max(const vector<float>& a, const int& b) const;
+ vector< vector<float> > fuzzyOutputValue_add(const vector<float>& a, const int& b) const;
+ //a:input vector,
+ //b: mode for antecedent--0:min 1:aver,
+ vector<float> defuzzify(const vector< vector<float> >& a,const int& b) const;
+ //b: mode for defuzzyfy--0:max 1:centroid without overlap
+ //2: with overlap;
+ // a: fuzzy output values (for each fuzzy set of each output variable
+ vector<float> output(const vector<float>& a, const int& b,const int& c,const int& d) const;
+ //a: input b:add/max c:min/aver d:max/without/with overlap
+ // return the value after defuzzify
+ FuzzyVariable& get_outputVariable(const int& a) const;
+ int checkRuleSetFired(const vector<float>& a) const;
+ //check this rule set is fired or not due to a
+ vector<float> fuzziness(const int& in,const int& on,const int& a,const int& b, const int& c) const;
+ //in: input no. on:output no. a:add/max b:min/aver c:max/wo/with overlap
+
+ //operators
+ FuzzyRule& operator [](int i) const;
+ FuzzyRuleSet& operator =(const FuzzyRuleSet& a);
+
+ //I/O operators
+ friend istream& operator>>(istream& is, FuzzyRuleSet& a);
+ friend ostream& operator<<(ostream& os, const FuzzyRuleSet& a);
+};
+
+#endif
diff --git a/fu_ga_fs/variablf.cpp b/fu_ga_fs/variablf.cpp
new file mode 100644
index 0000000..225be98
--- /dev/null
+++ b/fu_ga_fs/variablf.cpp
@@ -0,0 +1,466 @@
+#include "headfile.h"
+
+#include "memberf.h"
+#include "variablf.h"
+#include "vector.h"
+
+//constructors
+FuzzyVariable::FuzzyVariable(int a,float b,float c):
+setSize(a),startPoint(b),endPoint(c)
+{
+ fuzzySet= new FuzzyMember[setSize];
+ assert(fuzzySet !=0);
+ char *string="default";
+ int length=strlen(string)+2;
+ variableName= new char[length];
+ assert(variableName !=0);
+ strncpy(variableName,string,length);
+}
+
+FuzzyVariable::FuzzyVariable(int a,float b,float c,char *str):
+setSize(a),startPoint(b),endPoint(c)
+{
+ int length=strlen(str)+2;
+ variableName= new char[length];
+ assert(variableName !=0);
+ strncpy(variableName,str,length);
+ fuzzySet= new FuzzyMember[setSize];
+ assert(fuzzySet !=0);
+}
+
+FuzzyVariable::FuzzyVariable(const FuzzyVariable& a):
+setSize(a.setSize),startPoint(a.startPoint),endPoint(a.endPoint)
+{
+ fuzzySet= new FuzzyMember[setSize];
+ assert(fuzzySet !=0);
+ for (int i=0;i<setSize;i++)
+ fuzzySet[i]=a.fuzzySet[i];
+ int length=strlen(a.variableName)+2;
+ variableName= new char[length];
+ assert(variableName !=0);
+ strncpy(variableName,a.variableName,length);
+}
+
+//member functions
+FuzzyVariable&
+FuzzyVariable::standardVariable()
+{
+ float stepSize=(endPoint-startPoint)/(2*setSize);
+ delete []fuzzySet;
+ fuzzySet=new FuzzyMember[setSize];
+ assert(fuzzySet !=0);
+ FuzzyMember first(startPoint,startPoint+2*stepSize,"leftTriangle");
+ //most left rule
+ fuzzySet[0]=first;
+ //most right rule
+ first.change_member(endPoint-2*stepSize,endPoint,"rightTriangle");
+ fuzzySet[setSize-1]=first;
+ //rules inside
+ for (int i=1;i<(setSize-1);i++)
+ {
+ first.change_member(startPoint+(2*i-1)*stepSize,startPoint+(2*i+3)*stepSize,"Triangle");
+ fuzzySet[i]=first;
+ }
+ return *this;
+}
+
+FuzzyVariable&
+FuzzyVariable::change_setSize(const int& a)
+{
+ setSize=a;
+ return *this;
+}
+
+FuzzyVariable&
+FuzzyVariable::change_startPoint(const float& a)
+{
+ startPoint=a;
+ return *this;
+}
+
+FuzzyVariable&
+FuzzyVariable::change_endPoint(const float& a)
+{
+ endPoint=a;
+ return *this;
+}
+
+FuzzyVariable&
+FuzzyVariable::change_variableName(char *str)
+{
+ delete []variableName;
+ int length=strlen(str)+2;
+ variableName=new char[length];
+ strncpy(variableName,str,length);
+ return *this;
+}
+
+char*
+FuzzyVariable::setMeaning(const int& a,const int& b) const
+{
+ if (a!=setSize)
+ {
+ fprintf(stderr,"wrong setSize in member setMenaing");
+ exit(1);
+ }
+
+ char str[32];
+ if (a==3) //three sets for the variable
+ {
+ switch(b)
+ {
+ case 0:
+ strcpy(str,"Low");
+ break;
+ case 1:
+ strcpy(str,"Medium");
+ break;
+ default:
+ strcpy(str,"High");
+ }
+ }
+ else if (a==5)
+ {
+ switch(b)
+ {
+ case 0:
+ strcpy(str,"veryLow");
+ break;
+ case 1:
+ strcpy(str,"Low");
+ break;
+ case 2:
+ strcpy(str,"Medium");
+ break;
+ case 3:
+ strcpy(str,"High");
+ break;
+ default:
+ strcpy(str,"veryHigh");
+ }
+ }
+ else if (a==7)
+ {
+ switch(b)
+ {
+ case 0:
+ strcpy(str,"veryLow");
+ break;
+ case 1:
+ strcpy(str,"Low");
+ break;
+ case 2:
+ strcpy(str,"littleLow");
+ break;
+ case 3:
+ strcpy(str,"Medium");
+ break;
+ case 4:
+ strcpy(str,"littleHigh");
+ break;
+ case 5:
+ strcpy(str,"High");
+ break;
+ default:
+ strcpy(str,"veryHigh");
+ }
+ }
+ else if (a==9)
+ {
+ switch(b)
+ {
+ case 0:
+ strcpy(str,"extremeLow");
+ break;
+ case 1:
+ strcpy(str,"veryLow");
+ break;
+ case 2:
+ strcpy(str,"Low");
+ break;
+ case 3:
+ strcpy(str,"littleLow");
+ break;
+ case 4:
+ strcpy(str,"Medium");
+ break;
+ case 5:
+ strcpy(str,"littleHigh");
+ break;
+ case 6:
+ strcpy(str,"High");
+ break;
+ case 7:
+ strcpy(str,"veryHigh");
+ break;
+ default:
+ strcpy(str,"extremeHigh");
+ }
+ }
+ else
+ {
+ if (b>=10)
+ {
+ int temdec=b/10;
+ temdec=temdec+48;
+ strcpy(str,(char*)&temdec);
+ int tmp=b%10 +48;
+ strcat(str,(char*)&tmp);
+ }
+ else
+ {
+ int tmp=b+48;
+ strcpy(str,(char*)&tmp);
+ }
+ strcat(str,"th");
+ strcat(str,"Part");
+ }
+ char* tmp=str;
+ return tmp;
+}
+
+vector<int>
+FuzzyVariable::setFireFlag(const float& a) const
+ {
+ vector<int> newVec(setSize);
+ for (int i=0;i<setSize;i++)
+ newVec[i]=fuzzySet[i].member_flag(a);
+ return newVec;
+}
+
+float
+FuzzyVariable::output(const float& a, const int& b) const
+{
+ assert(b !=0);
+ vector<int> vec=this->setFireFlag(a);
+ if (vec[abs(b)-1]==0)
+ {
+ fprintf(stderr,"try to fire a unfired set");
+ exit(1);
+ }
+ float tmp;
+ if (b>0)
+ tmp=this->fuzzySet[b-1].memberFunction(a);
+ else
+ tmp=this->fuzzySet[abs(b)-1].not(a);
+ return tmp;
+}
+
+float
+FuzzyVariable::output_max(const float& a) const //return maximum membership value when input a
+{
+ vector<int> vecint(setSize); //store setFireFlag
+ vector<float> vecfloat(setSize); //store membership value
+ vecint=this->setFireFlag(a); //get setFireFlag
+ for (int j=0;j<setSize;j++)
+ {
+ if (vecint[j]==1) //this set fired?
+ {
+ vecfloat[j]=fuzzySet[j].memberFunction(a);
+ if (vecfloat[j]==0.0)
+ cout<<"output =0 in variablf.cpp, 281"<<endl; //test only
+ }
+ else
+ vecfloat[j]=0.0; //membership value
+ }
+ int ind=vecfloat.maximum_index(); //maximum index
+ return vecfloat[ind];
+}
+
+float
+FuzzyVariable::defuzzifyMax(const int& a,const vector<float>& b) const
+{
+ assert((a==setSize)&&(a==b.len()));
+ //check whther b is zero vector
+ int sum=0;
+ for (int i=0;i<b.len();i++)
+ {
+ if (b[i]>0)
+ sum++;
+ }
+ if (sum==0)
+ {
+ cerr<<"try to defuzzy while no rule fired in defuzzifyMax in variablf.cpp"<<endl;
+ exit(1);
+ }
+ int ind=b.maximum_index();
+ float first=fuzzySet[ind].get_startPoint();
+ float second=fuzzySet[ind].get_endPoint();
+ return (first+(second-first)/2);
+}
+
+int
+FuzzyVariable::defuzzyMax_index(const int&a, const vector<float>& b) const
+{
+ assert((a==setSize)&&(a==b.len()));
+ int sum=0;
+ for (int i=0;i<b.len();i++)
+ {
+ if (b[i]>0)
+ sum++;
+ }
+ if (sum==0)
+ {
+ cerr<<"try to defuzzy while no rule fired in defuzzyMax_index in variablf.cpp"<<endl;
+ exit(1);
+ }
+ return b.maximum_index();
+}
+
+float
+FuzzyVariable::defuzzyCentroid_add(const int& a,const vector<float>& b) const
+{
+ assert((a==setSize)&&(a==b.len()));
+ //check whether b is zero vector
+ int sum=0;
+ for (int i=0;i<b.len();i++)
+ {
+ if (b[i]>0)
+ sum++;
+ }
+ if (sum==0)
+ {
+ cerr<<"try to defuzzy while no rule fired in defuzzyCentroid_add in variablf.cpp"<<endl;
+ exit(1);
+ }
+ float stepsize=(endPoint-startPoint)/100; //divide into 100 intervals
+ vector<float> res(2); //first:nom second: dem
+ res[0]=0;
+ res[1]=0;
+ for (i=0;i<setSize;i++)
+ {
+ if (b[i] !=0.0)
+ res +=fuzzySet[i].centroid(b[i],stepsize);
+ }
+ //maybe should add the left and right shoulder's affect
+ int totalstep;
+ if (b[0] !=0)
+ {
+ if ((strncmp(fuzzySet[0].get_functionType(),"leftTriangle",strlen("leftTriangle")-1)==1)||(strncmp(fuzzySet[0].get_functionType(),"reverseSigmoid",strlen("reverseSigmoid")-1)==1))
+ {
+ totalstep=(fuzzySet[0].get_startPoint()-startPoint)/stepsize;
+ assert(totalstep>=0);
+ for (i=0;i<totalstep;i++)
+ {
+ res[0] +=(startPoint+i*stepsize)*b[0];
+ res[1] +=b[0];
+ }
+ }
+ }
+ if (b[setSize-1] !=0)
+ {
+ if ((strncmp(fuzzySet[setSize-1].get_functionType(),"rightTriangle",strlen("rightTriangle")-1)==1)||(strncmp(fuzzySet[setSize-1].get_functionType(),"Sigmoid",strlen("Sigmoid")-1)==1))
+ {
+ float tmp=fuzzySet[setSize-1].get_endPoint();
+ totalstep=(endPoint-tmp)/stepsize;
+ assert(totalstep>=0);
+ for (i=0;i<totalstep;i++)
+ {
+ res[0] +=(tmp+i*stepsize)*b[setSize-1];
+ res[1] +=b[setSize-1];
+ }
+ }
+ }
+ return res[0]/res[1];
+}
+
+float
+FuzzyVariable::defuzzyCentroid(const int& a,const vector<float>& b) const
+{
+ assert((a==setSize)&&(a==b.len()));
+ //check whether b is zero vector
+ int sum=0;
+ for (int i=0;i<b.len();i++)
+ {
+ if (b[i]>0)
+ sum++;
+ }
+ if (sum==0)
+ {
+ cerr<<"413:try to defuzzy while no rule fired in defuzzyCentroid in variablf.cpp"<<endl;
+ exit(1);
+ }
+ float stepsize=(endPoint-startPoint)/100; //divide into 100 intervals
+ vector<int> vecint(setSize); //store setFireFlag
+ vector<float> vecfloat(setSize); //store membership value
+ float tmp,num,dem; //tmp: set value, num &dem for centroid
+ num=0.0;
+ dem=0.0;
+ for (i=0;i<100;i++)
+ {
+ tmp=startPoint+i*stepsize;
+ vecint=this->setFireFlag(tmp); //get setFireFlag
+ for (int j=0;j<setSize;j++)
+ {
+ vecfloat[j]=0;
+ if (vecint[j]==1) //this set fired?
+ {
+ vecfloat[j]=fuzzySet[j].memberFunction(tmp); //membership value
+ if (vecfloat[j]>b[j]) //compare two membership values
+ vecfloat[j]=b[j]; //minimum
+ }
+ }
+ int ind=vecfloat.maximum_index(); //maximum index
+ num +=tmp*(vecfloat[ind]);
+ dem +=1.0*(vecfloat[ind]);
+ }
+ return num/dem; //centroid
+}
+
+
+//operators
+FuzzyMember&
+FuzzyVariable::operator [] (int i) const
+{
+ assert(i>=0&&i<setSize);
+ return fuzzySet[i];
+}
+
+FuzzyVariable&
+FuzzyVariable::operator =(const FuzzyVariable& a)
+{
+ if ((&a)==this) return *this;
+ delete []fuzzySet;
+ delete []variableName;
+ setSize=a.setSize;
+ startPoint=a.startPoint;
+ endPoint=a.endPoint;
+ fuzzySet=new FuzzyMember[setSize];
+ assert(fuzzySet !=0);
+ for (int i=0;i<setSize;i++)
+ fuzzySet[i]=a.fuzzySet[i];
+ int length=strlen(a.variableName)+2;
+ variableName =new char[length];
+ assert(variableName !=0);
+ strncpy(variableName,a.variableName,length);
+ return *this;
+}
+
+//friend I/O
+ostream& operator << (ostream& os, const FuzzyVariable& a)
+{
+ os<<a.variableName<<"\t";
+ os<<a.setSize<<"\t";
+ os<<a.startPoint<<"\t";
+ os<<a.endPoint<<endl;
+ for (int i=0;i<a.setSize;i++)
+ os<<a.fuzzySet[i];
+ return os;
+}
+
+istream& operator >>(istream& is, FuzzyVariable& a)
+{
+ char str[256];
+ int tmpsetSize;
+ float tmpstart,tmpend;
+ is>>str>>tmpsetSize>>tmpstart>>tmpend;
+ FuzzyVariable tmpVariable(tmpsetSize,tmpstart,tmpend,str);
+ for (int i=0;i<tmpsetSize;i++)
+ is>>tmpVariable.fuzzySet[i];
+ a=tmpVariable;
+ return is;
+}
+
+
+
+
diff --git a/fu_ga_fs/variablf.h b/fu_ga_fs/variablf.h
new file mode 100644
index 0000000..ffe8ed6
--- /dev/null
+++ b/fu_ga_fs/variablf.h
@@ -0,0 +1,59 @@
+#ifndef __VARIABLF_H__
+#define __VARIABLF_H__
+
+#include "memberf.h"
+#include "vector.h"
+
+class FuzzyVariable
+{
+private:
+ int setSize;
+ float startPoint;
+ float endPoint;
+ char *variableName;
+ FuzzyMember *fuzzySet;
+
+public:
+ //constructors
+ FuzzyVariable():setSize(0),startPoint(0),endPoint(0),variableName(0),fuzzySet(0){}
+ FuzzyVariable(int a,float b,float c);
+ FuzzyVariable(int a,float b,float c,char *str);
+ FuzzyVariable(const FuzzyVariable& a);
+
+ //destructor
+ ~FuzzyVariable(){delete []fuzzySet;delete []variableName;}
+
+ //member functions
+ FuzzyVariable& standardVariable();
+ char* get_variableName() const {return variableName;}
+ int get_setSize() const {return setSize;}
+ float get_startPoint() const {return startPoint;}
+ float get_endPoint() const {return endPoint;}
+ FuzzyVariable& change_setSize(const int& a);
+ FuzzyVariable& change_startPoint(const float& a);
+ FuzzyVariable& change_endPoint(const float& a);
+ FuzzyVariable& change_variableName(char *str);
+ char* setMeaning(const int& a,const int& b) const; //a:setSize b:which set
+ vector<int> setFireFlag(const float& a) const ;
+ float output(const float& a,const int& b) const; //b:set being chosen, output of set b
+ float output_max(const float& a) const; //a:input value
+ float defuzzifyMax(const int& a,const vector<float>& b) const;
+ //return the value
+ int defuzzyMax_index(const int& a,const vector<float>& b) const;
+ //return the set index
+ float defuzzyCentroid_add(const int& a,const vector<float>& b) const;
+ float defuzzyCentroid(const int& a,const vector<float>& b) const;
+
+ //operators
+ FuzzyMember& operator [] (int i) const;
+ FuzzyVariable& operator =(const FuzzyVariable& a);
+
+ //friend operator I/O
+ friend istream& operator >> (istream& is,FuzzyVariable& a);
+ friend ostream& operator << (ostream& os,const FuzzyVariable& a);
+};
+
+#endif
+
+
+
diff --git a/fu_ga_fs/variance.cpp b/fu_ga_fs/variance.cpp
new file mode 100644
index 0000000..af4434e
--- /dev/null
+++ b/fu_ga_fs/variance.cpp
@@ -0,0 +1,20 @@
+#include <math.h>
+
+#include "vector.h"
+
+float
+variance(vector<float> a, float b)
+{
+ float sum=0.0;
+ for (int i=0;i<a.len();i++)
+ {
+ float tem=a[i]-b;
+ tem=tem*tem;
+ sum+=tem;
+ }
+ sum=sum/a.len();
+ sum=sqrt(sum);
+ return(sum);
+}
+
+
diff --git a/fu_ga_fs/vector.h b/fu_ga_fs/vector.h
new file mode 100644
index 0000000..98ea5ae
--- /dev/null
+++ b/fu_ga_fs/vector.h
@@ -0,0 +1,197 @@
+#ifndef __VECTOR_H__
+#define __VECTOR_H__
+
+#include "headfile.h"
+
+template <class Type>
+class vector
+{
+private:
+ int row; //length of array (column);
+ Type* arr; //pointer to the array;
+public:
+ //constructors
+ vector():row(0),arr(0) {}
+ vector(int a);
+ vector(const vector<Type>& a);
+ vector(int a,Type* b);
+ ~vector(){delete []arr;}
+
+ //operators
+ vector<Type>& operator =(const vector<Type>& a);
+ vector<Type>& operator +=(const vector<Type>& a);
+ int operator !=(const vector<Type>& a) const;
+ int operator <(const vector<Type>& a) const {return (row<a.row);}
+ Type& operator [] (int i) const {assert(i>=0&&i<row); return arr[i];}
+
+ //member functions
+ int len() const {return row;}
+ Type sum() const;
+ int maximum_index() const;
+ vector<Type>& changeSize(const int& a);
+ Type minimum() const;
+
+ friend vector<Type> operator |(const vector<Type>& a,const vector<Type>& b);
+ friend istream& operator >> (istream& is,vector<Type>& a);
+ friend ostream& operator << (ostream& os, const vector<Type>& a);
+};
+
+
+template <class Type>
+vector<Type>::vector(int a):
+row(a) {
+ arr=new Type [row];
+ assert(arr!=0);
+}
+
+template <class Type>
+vector<Type>::vector(int a,Type* b):
+ row(a)
+{
+ arr=new Type[row];
+ assert(arr !=0);
+ for (int i=0;i<row;i++)
+ arr[i]=b[i];
+}
+
+
+template <class Type>
+vector<Type>::vector(const vector<Type>& a):
+row(a.row) {
+ arr = new Type [row];
+ assert(arr!=0);
+ for (int i=0;i<row;i++)
+ arr[i]=a[i];
+}
+
+template <class Type>
+vector<Type>&
+vector<Type>::changeSize(const int& a)
+{
+ delete []arr;
+ row=a;
+ arr=new Type[row];
+ assert(arr!=0);
+ return *this;
+}
+
+
+template <class Type>
+vector<Type>&
+vector<Type>::operator =(const vector<Type>& a)
+{
+ if ((&a)==this) return *this;
+ delete []arr;
+ row=a.row;
+ arr = new Type [row];
+ assert(arr!=0);
+ for (int i=0;i<row;i++)
+ arr[i]=a[i];
+ return *this;
+}
+
+template <class Type>
+vector<Type>&
+vector<Type>::operator +=(const vector<Type>& a)
+{
+ assert(row==a.row);
+ for (int i=0;i<row;i++)
+ arr[i] +=a[i];
+ return *this;
+}
+
+template <class Type>
+int
+vector<Type>::operator !=(const vector<Type>& a) const
+{
+ if ((&a)==this) return 0;
+ if (row !=a.row) return 1;
+ for (int i=0;i<row;i++)
+ {
+ if (arr[i] !=a[i]) return 1;
+ }
+ return 0;
+}
+
+//member functions
+template <class Type>
+Type
+vector<Type>::sum() const
+{
+ Type tmp=arr[0];
+ for (int i=1;i<row;i++)
+ tmp +=arr[i];
+ return tmp;
+}
+
+template <class Type>
+int
+vector<Type>::maximum_index() const
+{
+ Type max=arr[0];
+ int ind=0;
+ for (int i=1;i<row;i++)
+ {
+ if (max<arr[i])
+ {
+ max=arr[i];
+ ind=i;
+ }
+ }
+ return ind;
+}
+
+template <class Type>
+Type
+vector<Type>:: minimum() const
+{
+ Type mini=arr[0];
+ for (int i=1;i<row;i++)
+ {
+ if (arr[i]<mini)
+ mini=arr[i];
+ }
+ return mini;
+}
+
+
+//friend operators
+template <class Type>
+vector<Type> operator | (const vector<Type>& a,const vector<Type>& b)
+{
+ vector<Type> newVec(a.row+b.row);
+ for (int i=0;i<a.row;i++)
+ newVec[i]=a[i];
+ for (i=0;i<b.row;i++)
+ newVec[a.row+i]=b[i];
+ return newVec;
+}
+
+template <class Type>
+istream& operator >> (istream& is,vector<Type>& a)
+{
+ for (int i=0;i<a.row;i++)
+ is >> a[i];
+ return is;
+}
+
+template <class Type>
+ostream& operator << (ostream& os,const vector<Type>& a)
+{
+ int sum=0;
+ for (int j=0;j<a.row;j++)
+ {
+ os << a[j]<<"\t";
+ sum++;
+ if ((sum%8)==0)
+ {
+ os <<endl;
+ sum=0;
+ }
+ }
+ os<<endl;
+ return os;
+}
+#endif // _vector_h_
+
+
|
btbytes/ci
|
7c1477502c17c612acc36b14297c7f7f3046ab36
|
removing obsolete header file ref
|
diff --git a/fs/headfile.h b/fs/headfile.h
index f850d46..4e98e11 100644
--- a/fs/headfile.h
+++ b/fs/headfile.h
@@ -1,11 +1,8 @@
-#include <iostream.h>
+#include <iostream>
#include <fstream.h>
#include <assert.h>
#include <string.h>
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
-#include <conio.h>
-#include <dos.h>
-
|
btbytes/ci
|
581435765de3faf641edfa273253721a1ef4bf62
|
adding fuzzy system
|
diff --git a/fs/Makefile b/fs/Makefile
new file mode 100644
index 0000000..63aac62
--- /dev/null
+++ b/fs/Makefile
@@ -0,0 +1,2 @@
+all: fuzzyrul.h headfile.h memberf.h mystring.h ruleset.h variablf.h vector.h
+ gcc -Wall -lm fl.cpp fuzzyrul.cpp main.cpp memberf.cpp mystring.cpp ruleset.cpp variablf.cpp -o fs
diff --git a/fs/fl.cpp b/fs/fl.cpp
new file mode 100644
index 0000000..26d13cd
--- /dev/null
+++ b/fs/fl.cpp
@@ -0,0 +1,189 @@
+#include <fstream.h>
+#include <stdlib.h>
+#include <stdio.h>
+
+#include "memberf.h"
+#include "variablf.h"
+#include "vector.h"
+#include "fuzzyrul.h"
+#include "ruleset.h"
+
+#define max(a,b) ((a-b)>0)?a:b
+
+#define NAME_MAX 80
+
+char dataFileName[NAME_MAX], ruleName[NAME_MAX];
+char resultFileName[NAME_MAX],ruleInName[NAME_MAX];
+int fuzzyFlag; //1: average 0:minimum
+int defuzzyFlag; //0: maximum, 1: centroid without overlap, 2: with overlap
+int ruleEffectFlag; //1:maximum 0:add output values from each rule
+
+FuzzyRuleSet ruleSet;
+
+static void read_fl_runfile(char *dataFile); // read fuzzy system run file
+static void read_fl_rulefile(void); // read fuzzy system rule file
+static void write_fl_rules(void); // output fuzzy rules in verbal to a file
+
+// Listing 8.16 Implementation of fl()
+void fl (char *dataFile)
+{
+ read_fl_runfile(dataFile);
+ read_fl_rulefile();
+ write_fl_rules();
+
+ ifstream dFile;
+ dFile.open(dataFileName,ios::in);
+ if (!dFile)
+ {
+ cerr<<"cann't open file "<<dataFileName<<" for input"<<endl;
+ exit(1);
+ }
+
+ int indim,outdim; //input dim and output dim
+ dFile>>indim>>outdim;
+ vector<float> invec(indim);
+ vector<int> outvec(outdim);
+ vector<int> classN(outdim); //store class no. for each output
+ dFile>>classN;
+
+ int outVarDim=ruleSet[0].get_outputSize();
+ if (outdim !=outVarDim)
+ {
+ cout<<"dim of data outputs isn't equal to dim of output variables in rules"<<endl;
+ exit(1);
+ }
+
+ ofstream rFile;
+ rFile.open(resultFileName,ios::out);
+ if (!rFile)
+ {
+ cerr<<"cann't open file " <<resultFileName<< " for output\n"<<endl;
+ exit(1);
+ }
+
+ rFile<<"index\t"<<"Wrong?\t"<<"Target\t"<<"Obtained"<<endl;
+
+ int in_order=0;
+ int misclassify=0;
+ vector<int> cla(outVarDim);
+ vector<float> tmp(outVarDim);
+
+ while (dFile>>invec)
+ {
+ dFile>>outvec;
+ in_order++;
+ rFile<<in_order<<"\t";
+ if (ruleSet.checkRuleSetFired(invec)==1)
+ {
+ tmp=ruleSet.output(invec,ruleEffectFlag,fuzzyFlag,defuzzyFlag);
+
+ //get output class
+ for (int idx=0;idx<outVarDim;idx++)
+ cla[idx]=(int)(tmp[idx]*classN[idx]);
+
+ //output data dim equal to outputVariable dim
+ if (cla !=outvec)
+ {
+ rFile<<"wrong\t";
+ misclassify++;
+ }
+ else
+ rFile<<"\t";
+
+ rFile<<(outvec|cla);
+ }
+ else
+ {
+ rFile<<"rule set not fired"<<endl;
+ misclassify++;
+ }
+ }
+ dFile.close();
+
+ rFile<<"total misclassification is :"<<misclassify<<endl;
+ rFile.close();
+}
+
+// Listing 8.17 Implementation of read_fl_runfile()
+static void read_fl_runfile (char *dataFile)
+{
+ int true;
+ char Msg[NAME_MAX];
+ strcpy(Msg,"edit ");
+ strcat(Msg,dataFile);
+
+ ifstream runFile;
+ do
+ {
+ runFile.open(dataFile,ios::in);
+ if (!runFile)
+ {
+ cerr<<"cann't open file "<<dataFile<<" for input"<<endl;
+ exit(1);
+ }
+ runFile>>ruleInName>>dataFileName>>ruleName>>resultFileName;
+ runFile>>fuzzyFlag>>defuzzyFlag>>ruleEffectFlag;
+ runFile.close();
+ cout<<ruleInName<<endl;
+ cout<<dataFileName<<endl;
+ cout<<ruleName<<endl;
+ cout<<resultFileName<<endl;
+ cout<<fuzzyFlag<<" 0:minimum 1:average"<<endl;
+ cout<<defuzzyFlag<<" 0:maximum 1:without overlap 2:with overlap"<<endl;
+ cout<<ruleEffectFlag<<" 1: maximum of output values from each rule 0:add"<<endl;
+ cout<<"(C)ontinue, (Q)uit, (M)odify runfile ";
+ char condition;
+ cin>>condition;
+ switch(condition)
+ {
+ case 'c': true=0;
+ break;
+ case 'C': true=0;
+ break;
+ case 'q': exit(1);
+ case 'Q': exit(1);
+ case 'm': true=1;
+ system(Msg);
+ break;
+ case 'M': true=1;
+ system(Msg);
+ break;
+ default:
+ true=1;
+ break;
+ }
+ } while (true==1);
+}
+
+// Listing 8.18 Implementation of read_fl_rulefile()
+static void read_fl_rulefile (void)
+{
+ // FuzzyRule
+ ifstream iFile;
+ iFile.open(ruleInName,ios::in);
+ if (!iFile)
+ {
+ cerr<<"cann't open file "<<ruleInName<<" for input"<<endl;
+ exit(1);
+ }
+
+ iFile>>ruleSet;
+ iFile.close();
+
+}
+
+// Listing 8.19 Implementation of write_fl_rules()
+static void write_fl_rules (void)
+{
+ //output formed rules
+ ofstream oFile;
+ oFile.open(ruleName,ios::out);
+ if (!oFile)
+ {
+ cerr<<"cann't open file "<<ruleName<<" for output"<<endl;
+ exit(1);
+ }
+ for (int i=0;i<ruleSet.get_ruleSetSize();i++)
+ oFile<<i<<"th rule: "<<ruleSet[i].get_ruleContent()<<endl;
+ oFile.close();
+}
diff --git a/fs/fuzzyrul.cpp b/fs/fuzzyrul.cpp
new file mode 100644
index 0000000..9a2b074
--- /dev/null
+++ b/fs/fuzzyrul.cpp
@@ -0,0 +1,419 @@
+#include "headfile.h"
+#include "fuzzyrul.h"
+
+//constructors
+FuzzyRule::FuzzyRule(int a,int b,vector<int> c,vector<int> d):
+variableSize(a),outputSize(b)
+{
+ if ((c.len()!=variableSize)||(d.len()!=outputSize))
+ {
+ fprintf(stderr,"input(or output)Vector dimension doesn't equal rule's input(output) dimension");
+ exit(1);
+ }
+ inputSetFlag=c;
+ outputSetFlag=d;
+ inputVariable= new FuzzyVariable[variableSize];
+ assert(inputVariable !=0);
+ outputVariable= new FuzzyVariable[outputSize];
+ assert(outputVariable !=0);
+ ruleContent=new char[256];
+ assert(ruleContent !=0);
+ strcpy(ruleContent,"rule_not_formed_yet");
+}
+
+FuzzyRule::FuzzyRule(int a,int b,vector<int> c,vector<int> d,char* str):
+variableSize(a),outputSize(b)
+{
+ if ((c.len()!=variableSize)||(d.len()!=outputSize))
+ {
+ fprintf(stderr,"input(or output)Vector dimension doesn't equal rule's input(output) dimension");
+ exit(1);
+ }
+ inputSetFlag=c;
+ outputSetFlag=d;
+ inputVariable= new FuzzyVariable[variableSize];
+ assert(inputVariable !=0);
+ outputVariable= new FuzzyVariable[outputSize];
+ assert(outputVariable !=0);
+ int length=strlen(str)+2;
+ ruleContent=new char[length];
+ assert(ruleContent !=0);
+ strncpy(ruleContent,str,length);
+}
+
+FuzzyRule::FuzzyRule(const FuzzyRule& a):
+ variableSize(a.variableSize),outputSize(a.outputSize)
+{
+ inputSetFlag=a.inputSetFlag;
+ outputSetFlag=a.outputSetFlag;
+ inputVariable= new FuzzyVariable[variableSize];
+ assert(inputVariable !=0);
+ for (int i=0;i<variableSize;i++)
+ inputVariable[i]=a.inputVariable[i];
+ outputVariable= new FuzzyVariable[outputSize];
+ assert(outputVariable !=0);
+ for (i=0;i<outputSize;i++)
+ outputVariable[i]=a.outputVariable[i];
+ delete []ruleContent;
+ int length=strlen(a.ruleContent)+2;
+ ruleContent= new char[length];
+ assert(ruleContent !=0);
+ strncpy(ruleContent,a.ruleContent,length);
+}
+
+//member functions
+FuzzyRule&
+FuzzyRule::change_inputSetFlag(const vector<int>& a)
+{
+ //check whether have the correct dimension
+ if (variableSize !=a.len())
+ {
+ fprintf(stderr,"assign inputSetFlag with a different dimension");
+ exit(1);
+ }
+
+ inputSetFlag=a;
+ return *this;
+}
+
+FuzzyRule&
+FuzzyRule::change_outputSetFlag(const vector<int>& a)
+{
+ //check whether have the correct dimension
+ if (outputSize !=a.len())
+ {
+ fprintf(stderr,"assign outputSetFlag with a different dimension");
+ exit(1);
+ }
+
+ outputSetFlag=a;
+ return *this;
+}
+
+FuzzyRule&
+FuzzyRule::change_variableSize(const int& a)
+{
+ variableSize=a;
+ return *this;
+}
+
+FuzzyRule&
+FuzzyRule::change_outputSize(const int& a)
+{
+ outputSize=a;
+ return *this;
+}
+
+FuzzyRule&
+FuzzyRule::change_ruleContent(char *str)
+{
+ delete []ruleContent;
+ int length=strlen(str)+2;
+ ruleContent= new char[length];
+ assert(ruleContent !=0);
+ strncpy(ruleContent,str,length);
+ return *this;
+}
+
+FuzzyRule&
+FuzzyRule::form_ruleContent()
+{
+ delete []ruleContent;
+
+ char str[256];
+ strcpy(str,"if_");
+
+ int inflag=0,outflag=0;
+ int intmp=0,outtmp=0;
+ for (int i=0;i<variableSize;i++)
+ {
+ if (inputSetFlag[i]==0)
+ inflag++;
+ else
+ {
+ if (intmp>0)
+ strcat(str,"_and_");
+ intmp++;
+
+ strcat(str,this->inputVariable[i].get_variableName());
+ strcat(str,"_is_");
+ if (inputSetFlag[i]<0)
+ strcat(str,"Not");
+ strcat(str,this->inputVariable[i].setMeaning(this->inputVariable[i].get_setSize(),abs(inputSetFlag[i])-1));
+
+
+ }
+ }
+
+ strcat(str,"_then_");
+ for (i=0;i<outputSize;i++)
+ {
+ if (outputSetFlag[i]==0)
+ outflag++;
+ else
+ {
+ if (outtmp>0)
+ strcat(str,"_and_");
+ outtmp++;
+
+ strcat(str,this->outputVariable[i].get_variableName());
+ strcat(str,"_is_");
+ if (outputSetFlag[i]<0)
+ strcat(str,"Not");
+ strcat(str,this->outputVariable[i].setMeaning(this->outputVariable[i].get_setSize(),abs(outputSetFlag[i])-1));
+
+ }
+ }
+
+ if ((inflag==variableSize)||(outflag==outputSize))
+ {
+ ruleContent=new char[60];
+ assert(ruleContent !=0);
+ strcpy(ruleContent,"This_rule_doesn't_exist");
+ }
+ else
+ {
+ int length=strlen(str)+2;
+ ruleContent=new char[length];
+ assert(ruleContent !=0);
+ strncpy(ruleContent,str,length);
+ }
+ return *this;
+}
+
+FuzzyRule&
+FuzzyRule::change_outputVariable(const FuzzyVariable& a,const int& b)
+{
+ assert(b>=0&&b<outputSize);
+ outputVariable[b]=a;
+ return *this;
+}
+
+FuzzyVariable& //need & ?
+FuzzyRule::get_outputVariable(const int&a) const
+{
+ assert(a>=0&&a<outputSize);
+ return outputVariable[a];
+}
+
+// Listing 8.8 Implementation of method checkRuleActive() in class FuzzyRule
+int
+FuzzyRule::checkRuleActive(const vector<float>& a) const
+{//check whether this has been activated
+
+ assert(a.len()==variableSize);
+
+ vector<int>* vec;
+ vec= new vector<int>[variableSize];
+
+ int sum=0;
+ for (int i=0;i<variableSize;i++)
+ {
+ if (inputSetFlag[i]==0)
+ sum++;
+ else
+ {
+ vec[i]=inputVariable[i].setFireFlag(a[i]);
+ int ind=abs(inputSetFlag[i])-1;
+ if (vec[i][ind]==1)
+ sum++;
+ }
+ }
+
+ delete []vec;
+ if (sum==variableSize)
+ return 1;
+ else
+ return 0;
+}
+
+// Listing 8.9 Implementation of method FuzzyOutput() in class FuzzyRule
+vector<float>
+FuzzyRule::FuzzyOutput(const vector<float>& a) const
+{
+ //check the input dimension
+ assert(a.len()==variableSize);
+
+ //check whethe the rule is activated
+ if (checkRuleActive(a) !=1)
+ {
+ fprintf(stderr,"try to use unactivated rule\n");
+ exit(1);
+ }
+
+ float min=1.0,tmp;
+
+ for (int i=0;i<variableSize;i++)
+ {
+ if (inputSetFlag[i]!=0)
+ {
+ tmp=inputVariable[i].output(a[i],inputSetFlag[i]);
+ if (min>tmp)
+ min=tmp; //get the minimum value
+ }
+ }
+
+ vector<float> tmpout(outputSize);
+ for (i=0;i<outputSize;i++)
+ {
+ if (outputSetFlag[i] ==0)
+ tmpout[i]=0.0;
+ else
+ {
+ if (outputSetFlag[i]>0)
+ tmpout[i]=min;
+ else
+ {
+ if (min>=0.9999)
+ tmpout[i]=0.0001;
+ else
+ tmpout[i]=1-min;
+ }
+ }
+ }
+
+ return tmpout;
+}
+
+vector<float>
+FuzzyRule::FuzzyOutput_average(const vector<float>& a) const
+{
+ //check the input dimension
+ assert(a.len()==variableSize);
+
+ //check whethe the rule is activated
+ if (checkRuleActive(a) !=1)
+ {
+ fprintf(stderr,"try to use unactivated rule\n");
+ exit(1);
+ }
+
+ float ave=0.0;
+ int tmp=0;
+ for (int i=0;i<variableSize;i++)
+ {
+ if (inputSetFlag[i]!=0)
+ {
+ ave +=inputVariable[i].output(a[i],inputSetFlag[i]);
+ tmp++;
+ }
+ }
+ ave =ave/tmp;
+
+ vector<float> tmpout(outputSize);
+ for (i=0;i<outputSize;i++)
+ {
+ if (outputSetFlag[i] ==0)
+ tmpout[i]=0.0;
+ else
+ {
+ if (outputSetFlag[i]>0)
+ tmpout[i]=ave;
+ else
+ {
+ if (ave>=0.9999)
+ tmpout[i]=0.0001;
+ else
+ tmpout[i]=1-ave;
+ }
+ }
+ }
+
+ return tmpout;
+}
+
+vector<int>
+FuzzyRule::formRange(const int& a) const
+{
+ int tmp=variableSize+outputSize+1;
+ vector<int> range(tmp);
+ range[0]=a; //how many rules
+ for (int i=0;i<variableSize;i++)
+ range[i+1]=inputVariable[i].get_setSize();
+ for (i=0;i<outputSize;i++)
+ range[variableSize+i+1]=outputVariable[i].get_setSize();
+
+ return range;
+}
+
+
+//operators
+FuzzyVariable&
+FuzzyRule::operator [] (int i) const
+{
+ assert(i>=0&&i<variableSize);
+ return inputVariable[i];
+}
+
+FuzzyRule&
+FuzzyRule::operator =(const FuzzyRule& a)
+{
+ if ((&a)==this) return *this;
+
+ delete []inputVariable;
+ delete []outputVariable;
+ delete []ruleContent;
+ variableSize=a.variableSize;
+ outputSize=a.outputSize;
+ inputSetFlag=a.inputSetFlag;
+ outputSetFlag=a.outputSetFlag;
+ inputVariable=new FuzzyVariable[variableSize];
+ assert(inputVariable !=0);
+ for (int i=0;i<variableSize;i++)
+ inputVariable[i]=a.inputVariable[i];
+ outputVariable=new FuzzyVariable[outputSize];
+ assert(outputVariable !=0);
+ for (i=0;i<outputSize;i++)
+ outputVariable[i]=a.outputVariable[i];
+ int length=strlen(a.ruleContent)+2;
+ ruleContent=new char[length];
+ assert(ruleContent !=0);
+ strncpy(ruleContent,a.ruleContent,length);
+
+ return *this;
+}
+
+
+
+//I/O operators
+ostream& operator <<(ostream& os,const FuzzyRule& a)
+{
+ os<<a.variableSize<<"\t";
+ os<<a.outputSize<<endl;
+
+ for (int i=0;i<a.variableSize;i++)
+ os<<a[i];
+
+ for (i=0;i<a.outputSize;i++)
+ os<<a.get_outputVariable(i);
+
+ vector<int> intvec=a.inputSetFlag|a.outputSetFlag;
+ os<<intvec;
+
+ return os;
+}
+
+istream& operator >>(istream& is, FuzzyRule& a)
+{
+ int tmpVariableSize, tmpOutputSize;
+ is>>tmpVariableSize>>tmpOutputSize;
+ vector<int> tmpInputSetFlag(tmpVariableSize);
+ vector<int> tmpOutputSetFlag(tmpOutputSize);
+
+ FuzzyRule tmpRule(tmpVariableSize,tmpOutputSize,tmpInputSetFlag,tmpOutputSetFlag);
+ for (int i=0;i<tmpVariableSize;i++)
+ is>>tmpRule.inputVariable[i];
+ for (i=0;i<tmpOutputSize;i++)
+ is>>tmpRule.outputVariable[i];
+
+ is>>tmpInputSetFlag>>tmpOutputSetFlag;
+
+ tmpRule.change_inputSetFlag(tmpInputSetFlag);
+ tmpRule.change_outputSetFlag(tmpOutputSetFlag);
+ tmpRule.form_ruleContent();
+ a=tmpRule;
+
+ return is;
+}
+
+
diff --git a/fs/fuzzyrul.h b/fs/fuzzyrul.h
new file mode 100644
index 0000000..b896312
--- /dev/null
+++ b/fs/fuzzyrul.h
@@ -0,0 +1,74 @@
+#ifndef __FUZZYRUL_H__
+#define __FUZZYRUL_H__
+
+#include "variablf.h"
+#include "memberf.h"
+#include "vector.h"
+
+// Listing 8.7 Definition of class FuzzyRule
+class FuzzyRule
+{
+private:
+ int variableSize; //number of variables in a rule
+ int outputSize; //number of output in a rule
+ vector<int> inputSetFlag; //vector tell which set is activated for each variable
+ vector<int> outputSetFlag; //vector tell which set is activated for each variable;
+
+ FuzzyVariable *inputVariable; //pointers to the input variables
+ FuzzyVariable *outputVariable; //pointers to the output variables
+ char *ruleContent;
+
+public:
+ FuzzyRule():variableSize(0),outputSize(0),ruleContent(0),inputVariable(0),outputVariable(0)
+ {
+ vector<int> vec;
+ inputSetFlag=vec;
+ outputSetFlag=vec;
+ }
+ FuzzyRule(int a,int b,vector<int> c,vector<int> d);
+ FuzzyRule(int a,int b,vector<int> c,vector<int> d,char* str);
+
+ FuzzyRule(const FuzzyRule& a);
+
+ ~FuzzyRule(){delete []ruleContent; delete []inputVariable;delete []outputVariable;}
+
+ //member functions
+ int get_variableSize() const {return variableSize;}
+ int get_outputSize() const {return outputSize;}
+ vector<int> get_inputSetFlag()const {return inputSetFlag;}
+ vector<int> get_outputSetFlag() const {return outputSetFlag;}
+ char* get_ruleContent() const {return ruleContent;}
+
+ FuzzyRule& change_inputSetFlag(const vector<int>& a);
+ FuzzyRule& change_outputSetFlag(const vector<int>& a);
+ FuzzyRule& change_variableSize(const int& a);
+ FuzzyRule& change_outputSize(const int& a);
+ FuzzyRule& change_ruleContent(char* str);
+ FuzzyRule& form_ruleContent();
+
+ FuzzyRule& change_outputVariable(const FuzzyVariable& a,const int& b);
+ //bth outputVariable change to a
+
+ int checkRuleActive(const vector<float>& a) const;
+ //check this rule is activated via input a or not
+
+ vector<float> FuzzyOutput(const vector<float>& a) const;
+ //calculate the fuzzy output vector
+ vector<float> FuzzyOutput_average(const vector<float>& a) const;
+
+ FuzzyVariable& get_outputVariable(const int& a) const;
+ //need & ?
+ vector<int> formRange(const int& a) const;
+ //a: maximum rules; get possible maximum fuzzy set no. for each variable
+
+ //operator
+ FuzzyVariable& operator [] (int i) const;
+ FuzzyRule& operator =(const FuzzyRule& a);
+
+ //I/O operators
+ friend istream& operator >>(istream& is, FuzzyRule& a);
+ friend ostream& operator <<(ostream& os,const FuzzyRule& a);
+
+};
+#endif
+
diff --git a/fs/headfile.h b/fs/headfile.h
new file mode 100644
index 0000000..f850d46
--- /dev/null
+++ b/fs/headfile.h
@@ -0,0 +1,11 @@
+#include <iostream.h>
+#include <fstream.h>
+#include <assert.h>
+#include <string.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <math.h>
+#include <time.h>
+#include <conio.h>
+#include <dos.h>
+
diff --git a/fs/main.cpp b/fs/main.cpp
new file mode 100644
index 0000000..9238b4c
--- /dev/null
+++ b/fs/main.cpp
@@ -0,0 +1,15 @@
+#include "headfile.h"
+
+// Listing 8.15 Implementation of main()
+void
+main(int argc,char *argv[])
+{
+ extern void fl(char *);
+
+ if (argc !=2)
+ {
+ fprintf(stderr,"usuage: fl run_file_name\n");
+ exit(1);
+ }
+ fl(argv[1]);
+ }
diff --git a/fs/memberf.cpp b/fs/memberf.cpp
new file mode 100644
index 0000000..02a2c6d
--- /dev/null
+++ b/fs/memberf.cpp
@@ -0,0 +1,486 @@
+#include "headfile.h"
+
+#include "memberf.h"
+
+float LeftTriangle(float,float,float);
+float RightTriangle(float,float,float);
+float Triangle(float,float,float);
+float Sigmoid(float,float,float);
+float reverseSigmoid(float,float,float);
+float Gaussian(float,float,float);
+
+vector<float> antiLeftTriangle(float,float,float);
+vector<float> antiRightTriangle(float,float,float);
+vector<float> antiTriangle(float,float,float);
+vector<float> antiSigmoid(float,float,float);
+vector<float> antiReverseSigmoid(float,float,float);
+vector<float> antiGaussian(float,float,float);
+
+
+//operators
+
+FuzzyMember&
+FuzzyMember::operator =(const FuzzyMember& a)
+{
+ if ((&a)==this) return *this;
+
+ delete []functionType;
+ startPoint=a.startPoint;
+ endPoint=a.endPoint;
+
+ int length=strlen(a.functionType)+2;
+ functionType= new char[length];
+ assert(functionType !=0);
+ strncpy(functionType,a.functionType,length);
+
+ return *this;
+}
+
+// Listing 8.5 Defintion of public operator == in class FuzzyMember
+int
+FuzzyMember::operator ==(const FuzzyMember& a) const
+{
+ int tmp=1;
+ if ((&a)==this) return 1;
+
+ MyString str1(functionType);
+ MyString str2(a.functionType);
+ if (str1==str2)
+ {
+ if (startPoint !=a.startPoint)
+ tmp=0;
+ if (endPoint !=a.endPoint)
+ tmp=0;
+ }
+ else
+ return 0;
+
+ return tmp;
+}
+
+int
+FuzzyMember::operator <(const FuzzyMember& a) const
+{
+ int sum=0;
+ if (startPoint<a.startPoint) sum++;
+ if (endPoint<a.endPoint) sum++;
+ if ((startPoint<=a.startPoint)&&(endPoint<=a.endPoint)&&(sum>=1)) return 1;
+ else return 0;
+}
+
+int
+FuzzyMember::operator >(const FuzzyMember& a) const
+{
+ int sum=0;
+ if (endPoint>a.endPoint) sum++;
+ if (startPoint>a.startPoint) sum++;
+ if ((endPoint>=a.endPoint)&&(startPoint>=a.startPoint)&&sum>=1) return 1;
+ else return 0;
+}
+
+
+//member functions
+FuzzyMember&
+FuzzyMember::change_member(const float& a,const float& b, char *str)
+{
+ startPoint=a;
+ endPoint=b;
+ delete []functionType;
+ int length=strlen(str)+2;
+ functionType = new char[length];
+
+ assert(functionType !=0);
+ strncpy(functionType,str,length);
+
+ return *this;
+}
+
+int
+FuzzyMember::member_flag(const float& a) const
+{
+ int tmp=0;
+
+ if (strncmpi(functionType,"leftTriangle",strlen(functionType)-1)==0)
+ {
+ if (a<endPoint)
+ tmp=1;
+ }
+ else if (strncmpi(functionType,"rightTriangle",strlen(functionType)-1)==0)
+ {
+ if (a>startPoint)
+ tmp=1;
+ }
+ else if (strncmpi(functionType,"Triangle",strlen(functionType)-1)==0)
+ { //Triangle
+ if ((a>startPoint)&&(a<endPoint))
+ tmp=1;
+ }
+ else
+ { //Sigmoid and reverSigmoid and Gaussian
+ tmp=1;
+ }
+
+ return tmp;
+}
+
+int
+FuzzyMember::setTypeFlag() const
+{
+ int tmp;
+
+ if (strncmpi(functionType,"leftTriangle",strlen(functionType))==0)
+ tmp=1;
+ else if (strncmpi(functionType,"rightTriangle",strlen(functionType))==0)
+ tmp=2;
+ else if (strncmpi(functionType,"Triangle",strlen(functionType))==0)
+ tmp=3;
+ else if (strncmpi(functionType,"Sigmoid",strlen(functionType))==0)
+ tmp=4;
+ else if (strncmpi(functionType,"reverseSigmoid",strlen(functionType))==0)
+ tmp=5;
+ else if (strncmpi(functionType,"Gaussian",strlen(functionType))==0)
+ tmp=6;
+ else
+ tmp=0;
+
+ return tmp;
+}
+
+// Listing 8.4 Implementation of method memberFunction()
+float
+FuzzyMember::memberFunction(const float& a) const
+{
+ float tmp;
+
+ switch(this->setTypeFlag())
+ {
+ case 1:
+ tmp=LeftTriangle(a,startPoint,endPoint);
+ break;
+ case 2:
+ tmp=RightTriangle(a,startPoint,endPoint);
+ break;
+ case 3:
+ tmp=Triangle(a,startPoint,endPoint);
+ break;
+ case 4:
+ tmp=Sigmoid(a,startPoint,endPoint);
+ break;
+ case 5:
+ tmp=reverseSigmoid(a,startPoint,endPoint);
+ break;
+ case 6:
+ tmp=Gaussian(a,startPoint,endPoint);
+ break;
+ default:
+ cout<<"unknown fuzzySet type"<<endl;
+ exit(1);
+
+ }
+ return tmp;
+}
+
+float
+FuzzyMember::not(const float& a) const
+{
+ float tmp;
+ tmp=this->memberFunction(a);
+ if (tmp>=0.9999)
+ tmp=0.0001;
+ else
+ tmp=1-tmp;
+
+ return tmp;
+}
+
+
+vector<float>
+FuzzyMember::membership2input(const float& a) const
+{
+ vector<float> tmp(2);
+
+ switch(this->setTypeFlag())
+ {
+ case 1:
+ tmp=antiLeftTriangle(a,startPoint,endPoint);
+ break;
+ case 2:
+ tmp=antiRightTriangle(a,startPoint,endPoint);
+ break;
+ case 3:
+ tmp=antiTriangle(a,startPoint,endPoint);
+ break;
+ case 4:
+ tmp=antiSigmoid(a,startPoint,endPoint);
+ break;
+ case 5:
+ tmp=antiReverseSigmoid(a,startPoint,endPoint);
+ break;
+ case 6:
+ tmp=antiGaussian(a,startPoint,endPoint);
+ break;
+ default:
+ cout<<"unknown fuzzySet type"<<endl;
+ exit(1);
+ }
+
+ return tmp;
+}
+
+vector<float>
+FuzzyMember::centroid(const float& a,const float& b) const
+{
+ assert(b !=0);
+ vector<float> res(2);
+ res[0]=0;
+ res[1]=0;
+ int total=(int)((endPoint-startPoint)/b);
+ for (int i=0;i<total;i++)
+ {
+ float tmp1=startPoint+i*b;
+ float tmp2=memberFunction(tmp1);
+ if (tmp2>a)
+ tmp2=a;
+ res[0] +=tmp2*tmp1;
+ res[1] +=tmp2;
+ }
+
+ return res;
+}
+
+
+
+//friend I/O operators
+
+istream& operator >> (istream& is, FuzzyMember& a)
+{
+ char tmp[256];
+ float b1,b2;
+ is>>tmp>>b1>>b2;
+ a.change_member(b1,b2,tmp);
+
+ return is;
+}
+
+ostream& operator << (ostream& os, const FuzzyMember& a)
+{
+ os<<"\t"<<a.functionType<<"\t"<<a.startPoint<<"\t"<<a.endPoint;
+ os<<endl;
+ return os;
+}
+
+
+//local auxiliary function
+
+float
+LeftTriangle(float a, float firstPoint,float secondPoint)
+{
+ float tmp;
+ if (a>=secondPoint)
+ {
+ fprintf(stderr," %f is outside the LeftTraingle rangle(%f,%f)",a,firstPoint,secondPoint);
+ exit(1);
+ }
+ if (a<=firstPoint)
+ tmp=1.0;
+ else
+ tmp=(secondPoint-a)/(secondPoint-firstPoint);
+
+ return tmp;
+}
+
+float
+RightTriangle(float a, float firstPoint,float secondPoint)
+{
+ float tmp;
+
+ if (a<=firstPoint)
+ {
+ fprintf(stderr," %f is outside the rightTraingle rangle(%f,%f)",a,firstPoint,secondPoint);
+ exit(1);
+ }
+ if (a>=secondPoint)
+ tmp=1.0;
+ else
+ tmp=(a-firstPoint)/(secondPoint-firstPoint);
+ return tmp;
+}
+
+float
+Triangle(float a, float firstPoint,float secondPoint)
+{
+ float tmp,med;
+
+ if ((a<=firstPoint)||(a>=secondPoint))
+ {
+ fprintf(stderr," %f is outside the Traingle rangle(%f,%f)",a,firstPoint,secondPoint);
+ exit(1);
+ }
+
+ med=(secondPoint-firstPoint)/2;
+ if (med==0)
+ {
+ fprintf(stderr,"Triangle is a line, range (%f,%f)",firstPoint,secondPoint);
+ exit(1);
+ }
+ if (a<=(med+firstPoint))
+ tmp=(a-firstPoint)/med;
+ else
+ tmp=(secondPoint-a)/med;
+ return tmp;
+}
+
+float
+Sigmoid(float a, float firstPoint,float secondPoint)
+{
+ float tmp=((a-firstPoint)*12)/(secondPoint-firstPoint);
+ float result=1.0/(1.0+exp(-tmp+6));
+ if (result<0.00001)
+ result=0.00001;
+ return result;
+}
+
+float
+reverseSigmoid(float a, float firstPoint,float secondPoint)
+{
+ float result=1-Sigmoid(a,firstPoint,secondPoint);
+ if (result<0.00001)
+ result=0.00001;
+ return result;
+}
+
+float
+Gaussian(float a,float firstPoint,float secondPoint)
+{
+ float tmp=((a-firstPoint)*8.0)/(secondPoint-firstPoint) -4;
+ float result=exp(-0.5*tmp*tmp);
+ if (result<0.00001)
+ result=0.00001;
+ return result;
+}
+
+vector<float>
+antiLeftTriangle(float a, float firstPoint,float secondPoint)
+{
+ vector<float> tmp(2);
+ if (a>1||a<0)
+ {
+ fprintf(stderr," %f is outside the LeftTraingle value[0,1]",a);
+ exit(1);
+ }
+ if (a==0) {tmp[0]=tmp[1]=secondPoint; return tmp;}
+ if (a==1) {tmp[0]=tmp[1]=firstPoint; return tmp;}
+
+ tmp[0]=secondPoint-(a*(secondPoint-firstPoint));
+ tmp[1]=tmp[0];
+
+ return tmp;
+}
+
+vector<float>
+antiRightTriangle(float a, float firstPoint,float secondPoint)
+{
+ vector<float> tmp(2);
+ if (a>1||a<0)
+ {
+ fprintf(stderr," %f is outside the LeftTraingle value[0,1]",a);
+ exit(1);
+ }
+ if (a==0) {tmp[0]=tmp[1]=firstPoint; return tmp;}
+ if (a==1) {tmp[0]=tmp[1]=secondPoint; return tmp;}
+ tmp[0]=a*(secondPoint-firstPoint) + firstPoint;
+ tmp[1]=tmp[0];
+
+ return tmp;
+}
+
+vector<float>
+antiTriangle(float a, float firstPoint,float secondPoint)
+{
+ vector<float> tmp(2);
+ if (a>1||a<0)
+ {
+ fprintf(stderr," %f is outside the antiTraingle value[0,1]",a);
+ exit(1);
+ }
+ if (a==0) {tmp[0]=firstPoint;tmp[1]=secondPoint; return tmp;}
+ if (a==1) {tmp[0]=tmp[1]=0.5*(secondPoint-firstPoint)+firstPoint; return tmp;}
+
+ float med=(secondPoint-firstPoint)/2;
+
+ tmp[0]=a*med+firstPoint;
+ tmp[1]=secondPoint-a*med;
+
+ return tmp;
+}
+
+vector<float>
+antiSigmoid(float a, float firstPoint,float secondPoint)
+{
+ vector<float> tmp(2);
+ if (a>1||a<0)
+ {
+ fprintf(stderr," %f is outside the antiSigmoid value[0,1]",a);
+ exit(1);
+ }
+
+ float tmpfloat;
+ if (a>0.1)
+ {
+ tmpfloat=(1-a)/a;
+ tmpfloat=log(tmpfloat);
+ tmpfloat=6-tmpfloat;
+ }
+ else
+ {
+ tmpfloat=a/(1-a);
+ tmpfloat=log(tmpfloat);
+ tmpfloat=tmpfloat+6;
+ }
+ tmpfloat=(tmpfloat*(secondPoint-firstPoint))/12;
+ tmp[0]=tmpfloat+firstPoint;
+ tmp[1]=tmp[0];
+
+ return tmp;
+}
+
+vector<float>
+antiReverseSigmoid(float a, float firstPoint,float secondPoint)
+{
+ vector<float> tmp(2);
+ if (a>1||a<0)
+ {
+ fprintf(stderr," %f is outside the antiReverseSigmoid value[0,1]",a);
+ exit(1);
+ }
+
+ float tmpfloat=1-a;
+ tmp=antiSigmoid(tmpfloat,firstPoint,secondPoint);
+
+ return tmp;
+}
+
+vector<float>
+antiGaussian(float a,float firstPoint,float secondPoint)
+{
+ vector<float> tmp(2);
+ if (a>1||a<0)
+ {
+ fprintf(stderr," %f is outside the antiGaussian value[0,1]",a);
+ exit(1);
+ }
+
+ assert(a>0);
+
+ float tmpfloat=-2.0*log(a);
+ assert(tmpfloat>=0);
+ tmp[1]=sqrt(tmpfloat);
+ tmp[0]=-tmp[1];
+ tmp[0]=tmp[0]+4;
+ tmp[1]=tmp[1]+4;
+ tmp[0]=(tmp[0]*(secondPoint-firstPoint))/8+firstPoint;
+ tmp[1]=(tmp[1]*(secondPoint-firstPoint))/8+firstPoint;
+
+ return tmp;
+}
+
diff --git a/fs/memberf.h b/fs/memberf.h
new file mode 100644
index 0000000..c4009be
--- /dev/null
+++ b/fs/memberf.h
@@ -0,0 +1,72 @@
+#ifndef __MEMBERF_H__
+#define __MEMBERF_H__
+
+#include "vector.h"
+#include "mystring.h"
+
+// Listing 8.3 Definition of class FuzzyMember
+class FuzzyMember
+{
+private:
+ float startPoint;
+ float endPoint;
+ char *functionType;
+
+public:
+ //constructor
+ FuzzyMember():startPoint(0),endPoint(0),functionType(0) {}
+ FuzzyMember(float a,float b, char *str);
+ FuzzyMember(const FuzzyMember& a);
+
+ //destructor
+ ~FuzzyMember(){delete []functionType;}
+
+ //member fuction
+ float memberFunction(const float& a) const;
+ float not(const float& a) const;
+ vector<float> membership2input(const float& a) const ;
+ float get_startPoint() const {return startPoint;}
+ float get_endPoint() const {return endPoint;}
+ char* get_functionType() const {return functionType;}
+ int member_flag(const float& a) const; //a belong to this member
+ int setTypeFlag() const; //0:unknown 1:leftT 2:rightT 3:T
+
+ FuzzyMember& change_member(const float& a,const float& b,char *str);
+
+ vector<float> centroid(const float& a,const float& b) const;
+
+ //operators
+ FuzzyMember& operator =(const FuzzyMember& a);
+ int operator ==(const FuzzyMember& a) const;
+ int operator < (const FuzzyMember& a) const;
+ //the FuzzyMember is left to a);
+ int operator > (const FuzzyMember& a) const;
+ //the FuzzyMember is right to a);
+
+ //friend operator I/O
+ friend istream& operator >> (istream& is,FuzzyMember& a);
+ friend ostream& operator << (ostream& os,const FuzzyMember& a);
+};
+
+inline FuzzyMember::FuzzyMember(float a,float b,char* str):
+ startPoint(a),endPoint(b)
+{
+ assert(startPoint<=endPoint);
+ int length=strlen(str)+2;
+
+ functionType= new char[length];
+ assert(functionType !=0);
+ strncpy(functionType,str,length);
+}
+
+inline FuzzyMember::FuzzyMember(const FuzzyMember& a):
+ startPoint(a.startPoint),endPoint(a.endPoint)
+{
+ assert(startPoint<=endPoint);
+ int length=strlen(a.functionType)+2;
+
+ functionType= new char[length];
+ assert(functionType !=0);
+ strncpy(functionType,a.functionType,length);
+}
+#endif
diff --git a/fs/mystring.cpp b/fs/mystring.cpp
new file mode 100644
index 0000000..0edf19f
--- /dev/null
+++ b/fs/mystring.cpp
@@ -0,0 +1,200 @@
+#include "headfile.h"
+#include "mystring.h"
+
+MyString::MyString(int a):
+ stringSize(a),currentPosition(0)
+{
+ stringPtr=new char[stringSize+1];
+ assert(stringPtr !=0);
+}
+
+MyString::MyString(char *str):
+ currentPosition(0)
+{
+ int length=strlen(str);
+ stringSize=length;
+ stringPtr= new char[stringSize+1];
+ assert(stringPtr !=0);
+ strncpy(stringPtr,str,length+1);
+}
+
+MyString::MyString(const MyString& a)
+{
+ stringSize=a.stringSize;
+ currentPosition=a.currentPosition;
+ stringPtr=new char[stringSize+1];
+ assert(stringPtr !=0);
+ strncpy(stringPtr,a.stringPtr,stringSize+1);
+}
+
+//member functions
+MyString&
+MyString::change_stringSize(const int& a)
+{
+ assert(a>=0);
+ stringSize=a;
+ return *this;
+}
+
+MyString&
+MyString::change_currentPosition(const int& a)
+{
+ assert(a>=0&&a<stringSize);
+ currentPosition=a;
+ return *this;
+}
+
+MyString&
+MyString::change_stringContent(char *str)
+{
+ delete []stringPtr;
+ int length=strlen(str);
+ stringSize=length;
+ currentPosition=0;
+ stringPtr=new char[stringSize+1];
+ assert(stringPtr !=0);
+ strncpy(stringPtr,str,length+1);
+
+ return *this;
+}
+
+int
+MyString::findNextF(char ch) const
+{
+ int tmp=-1;
+ for (int i=currentPosition;i<stringSize;i++)
+ {
+ if (stringPtr[i]==ch)
+ {
+ tmp=i;
+ break;
+ }
+ }
+ return tmp;
+}
+
+int
+MyString::findNextB(char ch) const
+{
+ int tmp=-1;
+ for (int i=currentPosition;i>=0;i--)
+ {
+ if (stringPtr[i]==ch)
+ {
+ tmp=i;
+ break;
+ }
+ }
+ return tmp;
+}
+
+int
+MyString::totalNumberF(char ch) const
+{
+ int sum=0;
+ for (int i=currentPosition;i<stringSize;i++)
+ {
+ if (stringPtr[i]==ch)
+ sum++;
+ }
+ return sum;
+}
+
+int
+MyString::totalNumberB(char ch) const
+{
+ int sum=0;
+ for (int i=currentPosition;i>=0;i--)
+ {
+ if (stringPtr[i]==ch)
+ sum++;
+ }
+ return sum;
+}
+
+MyString
+MyString::get_subString(const int& a)
+{
+ assert((currentPosition+a)<=stringSize);
+ MyString substr(a);
+ for (int i=currentPosition;i<(currentPosition+a);i++)
+ substr.stringPtr[i-currentPosition]=stringPtr[i];
+
+ substr.stringPtr[a]='\0';
+ return substr;
+}
+
+//operators
+
+char&
+MyString::operator [] (int i) const
+{
+ assert(i>=0&&i<stringSize);
+ return stringPtr[i];
+}
+
+MyString&
+MyString::operator =(const MyString& a)
+{
+ if ((&a)==this) return *this;
+ delete []stringPtr;
+ stringSize=a.stringSize;
+ currentPosition=a.currentPosition;
+ stringPtr=new char[stringSize+1];
+ assert(stringPtr !=0);
+ strncpy(stringPtr,a.stringPtr,stringSize+1);
+
+ return *this;
+}
+
+int
+MyString::operator ==(const MyString& a) const
+{
+ if ((&a)==this) return 1;
+
+ if (stringSize !=a.stringSize) return 0;
+
+ int tmp=1;
+ for (int i=0;i<stringSize;i++)
+ {
+ if (stringPtr[i] !=a.stringPtr[i])
+ {
+ tmp=0;
+ break;
+ }
+ }
+
+ return tmp;
+}
+
+
+// friend I/O operators
+
+ostream& operator <<(ostream& os,const MyString& a)
+{
+ os<<a.stringPtr<<endl;
+ return os;
+}
+
+istream& operator >>(istream& is,MyString& a)
+{
+ char tmp[256];
+ is>>tmp;
+ int length=strlen(tmp);
+ a.stringSize=length;
+ a.currentPosition=0;
+ delete []a.stringPtr;
+ a.stringPtr=new char[length+1];
+ assert(a.stringPtr !=0);
+ strncpy(a.stringPtr,tmp,length+1);
+
+ return is;
+}
+
+
+
+
+
+
+
+
diff --git a/fs/mystring.h b/fs/mystring.h
new file mode 100644
index 0000000..05eebca
--- /dev/null
+++ b/fs/mystring.h
@@ -0,0 +1,47 @@
+#ifndef __MYSTRING_H__
+#define __MYSTRING_H__
+
+// Listing 8.2 Definition of class Mystring
+class MyString
+{
+private:
+ int stringSize;
+ char *stringPtr;
+ int currentPosition;
+
+public:
+ //constructors
+ MyString():stringSize(0),stringPtr(0),currentPosition(0) {}
+ MyString(int a);
+ MyString(char * str);
+ MyString(const MyString& a);
+
+ //destructor
+ ~MyString() {delete []stringPtr;}
+
+ //member functions
+ int get_stringSize() const {return stringSize;}
+ int get_currentPosition() const {return currentPosition;}
+ char* get_stringPtr() const {return stringPtr;}
+
+ MyString& change_stringSize(const int& a);
+ MyString& change_currentPosition(const int& a);
+ MyString& change_stringContent(char *str);
+
+ int findNextF(char ch) const;
+ int findNextB(char ch) const;
+ int totalNumberF(char ch) const;
+ int totalNumberB(char ch) const;
+ MyString get_subString(const int& a); //a: size of subString
+ //from current position
+
+ // operators
+ char& operator [] (int i) const;
+ MyString& operator =(const MyString& a);
+ int operator ==(const MyString& a) const;
+
+ //friend I/O operators
+ friend ostream& operator <<(ostream& os, const MyString& a);
+ friend istream& operator >>(istream& is, MyString& a);
+};
+#endif
diff --git a/fs/ruleset.cpp b/fs/ruleset.cpp
new file mode 100644
index 0000000..c4c193a
--- /dev/null
+++ b/fs/ruleset.cpp
@@ -0,0 +1,414 @@
+#include "headfile.h"
+#include "ruleset.h"
+
+//constructors
+FuzzyRuleSet::FuzzyRuleSet(int a):
+ ruleSetSize(a)
+{
+ rules = new FuzzyRule[ruleSetSize];
+ assert(rules !=0);
+}
+
+FuzzyRuleSet::FuzzyRuleSet(int a,FuzzyRule *b):
+ruleSetSize(a)
+{
+ rules=new FuzzyRule[ruleSetSize];
+ assert(rules !=0);
+ for (int i=0;i<ruleSetSize;i++)
+ rules[i]=b[i];
+}
+
+
+FuzzyRuleSet::FuzzyRuleSet(const FuzzyRuleSet& a):
+ruleSetSize(a.ruleSetSize)
+{
+ rules =new FuzzyRule[ruleSetSize];
+ assert(rules !=0);
+ for (int i=0;i<ruleSetSize;i++)
+ rules[i]=a.rules[i];
+}
+
+
+//member functions
+
+FuzzyRuleSet&
+FuzzyRuleSet::addRuleB(const FuzzyRule& a,const int& b)
+{
+ FuzzyRuleSet newSet(ruleSetSize,rules);
+ ruleSetSize++;
+
+ delete []rules;
+ rules =new FuzzyRule[ruleSetSize];
+ assert(rules !=0);
+
+ for (int i=0;i<b;i++)
+ rules[i]=newSet[i];
+
+ rules[b]=a;
+
+ for (i=b;i<(ruleSetSize-1);i++)
+ rules[i+1]=newSet[i];
+
+ return *this;
+}
+
+FuzzyRuleSet&
+FuzzyRuleSet::addRule(const FuzzyRule& a)
+{
+ FuzzyRuleSet newSet(ruleSetSize,rules);
+
+ delete []rules;
+ rules =new FuzzyRule[ruleSetSize+1];
+ assert(rules !=0);
+
+ for (int i=0;i<ruleSetSize;i++)
+ rules[i]=newSet[i];
+
+ rules[ruleSetSize]=a;
+
+ ruleSetSize++;
+ return *this;
+}
+
+FuzzyRuleSet&
+FuzzyRuleSet::deleteRule(const int& a)
+{
+ assert((a>=0)&&(a<ruleSetSize));
+ FuzzyRuleSet newSet(ruleSetSize,rules);
+
+ delete []rules;
+ ruleSetSize--;
+ rules =new FuzzyRule[ruleSetSize];
+ assert(rules !=0);
+ for (int i=0;i<a;i++)
+ rules[i]=newSet[i];
+
+ for (i=a;i<ruleSetSize;i++)
+ rules[i]=newSet[i+1];
+
+ return *this;
+}
+
+//a:input vector,
+//b: mode for antecedent--0:min 1:aver, c: mode for defuzzyfy--0:max 1:centroid without overlap
+//2: with overlap; output_add: add the contribution from each rule
+
+// Listing 8.11 Implementation of method fuzzyOutputValue_max() in class FuzzyRuleSet
+vector< vector<float> >
+FuzzyRuleSet::fuzzyOutputValue_max(const vector<float>& a, const int& b) const
+{
+ if (a.len() !=rules[0].get_variableSize())
+ {
+ fprintf(stderr,"input dim doesn't match the inputVariable no. of the rule");
+ exit(1);
+ }
+
+ int outVarDim=rules[0].get_outputSize(); //outputVariable no. in rules
+
+ vector< vector<float> > result(outVarDim);
+
+ vector<int> varDim(outVarDim);
+
+ for (int i=0;i<outVarDim;i++)
+ {
+ varDim[i]=rules[0].get_outputVariable(i).get_setSize();
+ result[i].changeSize(varDim[i]);
+ }
+
+ //initialization of result
+ for (i=0;i<outVarDim;i++)
+ for (int j=0;j<varDim[i];j++)
+ result[i][j]=0;
+
+ vector<float> tmpres(outVarDim);
+ for (i=0;i<ruleSetSize;i++)
+ {
+ int ter=rules[i].checkRuleActive(a);
+ if (ter==1)
+ {
+ vector<int> tmpvec=rules[i].get_outputSetFlag();
+
+ if (b==1)
+ tmpres=rules[i].FuzzyOutput_average(a);
+ else
+ tmpres=rules[i].FuzzyOutput(a);
+
+ for (int j=0;j<outVarDim;j++)
+ {
+ if (tmpvec[j] !=0)
+ result[j][abs(tmpvec[j])-1]=max(result[j][abs(tmpvec[j])-1],tmpres[j]);
+ }
+ }
+ }
+ return result;
+}
+
+vector< vector<float> >
+FuzzyRuleSet::fuzzyOutputValue_add(const vector<float>& a, const int& b) const
+{
+ if (a.len() !=rules[0].get_variableSize())
+ {
+ fprintf(stderr,"input dim doesn't match the inputVariable no. of the rule");
+ exit(1);
+ }
+
+ int outVarDim=rules[0].get_outputSize(); //outputVariable no. in rules
+
+ vector< vector<float> > result(outVarDim);
+
+ vector<int> varDim(outVarDim);
+
+ for (int i=0;i<outVarDim;i++)
+ {
+ varDim[i]=rules[0].get_outputVariable(i).get_setSize();
+ result[i].changeSize(varDim[i]);
+ }
+
+ //initialization of result
+ for (i=0;i<outVarDim;i++)
+ for (int j=0;j<varDim[i];j++)
+ result[i][j]=0;
+
+ vector<float> tmpres(outVarDim);
+ for (i=0;i<ruleSetSize;i++)
+ {
+ int ter=rules[i].checkRuleActive(a);
+ if (ter==1)
+ {
+ vector<int> tmpvec=rules[i].get_outputSetFlag();
+ if (b==1)
+ tmpres=rules[i].FuzzyOutput_average(a);
+ else
+ tmpres=rules[i].FuzzyOutput(a);
+
+ for (int j=0;j<outVarDim;j++)
+ {
+ if (tmpvec[j] !=0)
+ {
+ result[j][abs(tmpvec[j])-1] +=tmpres[j];
+ if ((result[j][abs(tmpvec[j])-1])>1)
+ result[j][abs(tmpvec[j])-1]=1;
+ }
+ }
+ }
+ }
+
+ return result;
+}
+
+// Listing 8.12 Implementation of method defuzzify(0 in class FuzzyRuleSet
+vector<float>
+FuzzyRuleSet::defuzzify(const vector< vector<float> >& a,const int& b) const
+{
+ //get output variables in a rule
+ int outVarDim=rules[0].get_outputSize();
+ vector<float> tmp(outVarDim);
+ vector<int> varDim(outVarDim);
+
+ for (int i=0;i<outVarDim;i++)
+ {
+ //fuzzy set no.`in output variable i
+ varDim[i]=this->get_outputVariable(i).get_setSize();
+ //defuzzify for output variable i
+
+ if (b==0)
+ tmp[i]=this->get_outputVariable(i).defuzzifyMax(varDim[i],a[i]);
+ else if (b==1)
+ tmp[i]=this->get_outputVariable(i).defuzzyCentroid(varDim[i],a[i]);
+ else
+ tmp[i]=this->get_outputVariable(i).defuzzyCentroid_add(varDim[i],a[i]);
+
+ }
+ return tmp;
+}
+
+FuzzyVariable&
+FuzzyRuleSet::get_outputVariable(const int& a) const
+{
+ assert (a>=0&&a<rules[0].get_outputSize());
+ return rules[0].get_outputVariable(a);
+}
+
+int
+FuzzyRuleSet::checkRuleSetFired(const vector<float>& a) const
+{
+ assert(a.len()==rules[0].get_variableSize());
+ int sum=0;
+ for (int i=0;i<ruleSetSize;i++)
+ sum +=rules[i].checkRuleActive(a);
+ return (sum>0);
+}
+
+// Listing 8.13 Implementation of method output() in class FuzzyruleSet
+vector<float>
+FuzzyRuleSet::output(const vector<float>& a, const int& b,const int& c,const int& d) const
+{ //a: input b:add/max c:min/aver d:max/without/with overlap
+ // return the value after defuzzify
+ if (a.len() !=rules[0].get_variableSize())
+ {
+ fprintf(stderr,"input dim doesn't match the inputVariable no. of the rule");
+ exit(1);
+ }
+
+ int outVarDim=rules[0].get_outputSize(); //outputVariable no. in rules
+ vector< vector<float> > result(outVarDim);
+
+ vector<int> varDim(outVarDim);
+
+ for (int i=0;i<outVarDim;i++)
+ {
+ varDim[i]=rules[0].get_outputVariable(i).get_setSize();
+ result[i].changeSize(varDim[i]);
+ } //allocate memory for result
+
+ if (b==1)
+ result=this->fuzzyOutputValue_max(a,c);
+ else
+ result= this->fuzzyOutputValue_add(a,c);
+
+ vector<float> tmp(outVarDim);
+ tmp=this->defuzzify(result,d);
+
+ return tmp;
+}
+
+
+vector<float>
+FuzzyRuleSet::output_new(const vector<float>& a, const int& b,const int& c,const int& d) const
+{ //a: input b:add/max c:min/aver d:max/without/with overlap
+ // return the value after defuzzify
+ if (a.len() !=rules[0].get_variableSize())
+ {
+ fprintf(stderr,"input dim doesn't match the inputVariable no. of the rule");
+ exit(1);
+ }
+
+ int outVarDim=rules[0].get_outputSize(); //outputVariable no. in rules
+ vector< vector<float> > result(outVarDim);
+ vector<int> varDim(outVarDim);
+
+ for (int i=0;i<outVarDim;i++)
+ {
+ varDim[i]=this->get_outputVariable(i).get_setSize();
+ result[i].changeSize(varDim[i]);
+ } //allocate memory for result
+
+ //initialization of result
+ for (i=0;i<outVarDim;i++)
+ for (int j=0;j<varDim[i];j++)
+ result[i][j]=0;
+
+ //get fuzzy output
+ vector<float> tmpres(outVarDim);
+ for (i=0;i<ruleSetSize;i++)
+ {
+ int ter=rules[i].checkRuleActive(a);
+ if (ter==1)
+ {
+ vector<int> tmpvec=rules[i].get_outputSetFlag();
+
+ if (c==1)
+ tmpres=rules[i].FuzzyOutput_average(a);
+ else
+ tmpres=rules[i].FuzzyOutput(a);
+
+ for (int j=0;j<outVarDim;j++)
+ {
+ if (tmpvec[j] !=0)
+ {
+ if (b==1)
+ result[j][abs(tmpvec[j])-1]=max(result[j][abs(tmpvec[j])-1],tmpres[j]);
+ else
+ {
+ result[j][abs(tmpvec[j])-1] +=tmpres[j];
+ if ((result[j][abs(tmpvec[j])-1])>1)
+ result[j][abs(tmpvec[j])-1]=1;
+ }
+ }
+ }
+ }
+ }
+
+ //defuzzy
+ vector<float> tmp(outVarDim);
+ for (i=0;i<outVarDim;i++)
+ {
+ //defuzzify for output variable i
+ if (d==0)
+ tmp[i]=this->get_outputVariable(i).defuzzifyMax(varDim[i],result[i]);
+ else if (d==1)
+ tmp[i]=this->get_outputVariable(i).defuzzyCentroid(varDim[i],result[i]);
+ else
+ tmp[i]=this->get_outputVariable(i).defuzzyCentroid_add(varDim[i],result[i]);
+ }
+
+ return tmp;
+}
+
+
+//operators
+
+FuzzyRule&
+FuzzyRuleSet::operator [] (int i) const
+{
+ assert((i>=0)&&(i<ruleSetSize));
+ return rules[i];
+}
+
+FuzzyRuleSet&
+FuzzyRuleSet::operator =(const FuzzyRuleSet& a)
+{
+ if ((&a)==this) return *this;
+
+ delete []rules;
+ ruleSetSize=a.ruleSetSize;
+ rules =new FuzzyRule[ruleSetSize];
+ assert(rules !=0);
+
+ for (int i=0;i<ruleSetSize;i++)
+ rules[i]=a[i];
+
+ return *this;
+}
+
+//I/O operators
+
+// Listing 8.14 Definiton of operators << and >> in FuzzyRuleSet class
+istream& operator>>(istream& is, FuzzyRuleSet& a)
+{
+ is>>a.ruleSetSize;
+ if (a.rules !=0)
+ delete []a.rules;
+ a.rules =new FuzzyRule[a.ruleSetSize];
+ is>>a.rules[0];
+ vector<int> vecin(a.rules[0].get_variableSize());
+ vector<int> vecout(a.rules[0].get_outputSize());
+ for (int i=1;i<a.ruleSetSize;i++)
+ {
+ a.rules[i]=a.rules[0];
+ is>>vecin;
+ a.rules[i].change_inputSetFlag(vecin);
+ is>>vecout;
+ a.rules[i].change_outputSetFlag(vecout);
+ a.rules[i].form_ruleContent();
+ }
+
+ return is;
+}
+
+ostream& operator<<(ostream& os, const FuzzyRuleSet& a)
+{
+ assert(a.ruleSetSize !=0);
+ os<<a.ruleSetSize<<endl;
+ os<<a[0];
+
+ for (int i=1;i<a.ruleSetSize;i++)
+ {
+ os<<(a[i].get_inputSetFlag()|a[i].get_outputSetFlag());
+ }
+
+ return os;
+}
+
+
+
diff --git a/fs/ruleset.h b/fs/ruleset.h
new file mode 100644
index 0000000..72be15c
--- /dev/null
+++ b/fs/ruleset.h
@@ -0,0 +1,58 @@
+#ifndef __RULESET_H__
+#define __RULESET_H__
+
+#include "fuzzyrul.h"
+#include "variablf.h"
+#include "memberf.h"
+#include "vector.h"
+
+// Listing 8.10 Definition of class FuzzyRuleSet
+class FuzzyRuleSet
+{
+private:
+ int ruleSetSize; //how many rules in the set
+ FuzzyRule *rules; //pointers to the fuzzy rule set
+
+public:
+ FuzzyRuleSet():ruleSetSize(0),rules(0) {}
+ FuzzyRuleSet(int a);
+ FuzzyRuleSet(int a, FuzzyRule *b);
+ FuzzyRuleSet(const FuzzyRuleSet& a);
+
+ ~FuzzyRuleSet() {delete []rules;}
+
+ //member functions
+ int get_ruleSetSize() const {return ruleSetSize;}
+ FuzzyRuleSet& addRuleB(const FuzzyRule& a,const int& b); //add rule a at position b
+ FuzzyRuleSet& addRule(const FuzzyRule& a); //add rule a at the end of set
+ FuzzyRuleSet& deleteRule(const int& a); //delete the 'a'th rule
+
+ vector< vector<float> > fuzzyOutputValue_max(const vector<float>& a, const int& b) const;
+ vector< vector<float> > fuzzyOutputValue_add(const vector<float>& a, const int& b) const;
+ //a:input vector,
+ //b: mode for antecedent--0:min 1:aver,
+ vector<float> defuzzify(const vector< vector<float> >& a,const int& b) const;
+ //b: mode for defuzzyfy--0:max 1:centroid without overlap
+ //2: with overlap;
+ // a: fuzzy output values (for each fuzzy set of each output variable
+ vector<float> output(const vector<float>& a, const int& b,const int& c,const int& d) const;
+ //a: input b:add/max c:min/aver d:max/without/with overlap
+ // return the value after defuzzify
+
+ vector<float> output_new(const vector<float>& a, const int& b,const int& c,const int& d) const;
+
+ FuzzyVariable& get_outputVariable(const int& a) const;
+
+ int checkRuleSetFired(const vector<float>& a) const;
+ //check this rule set is fired or not due to a
+
+ //operators
+ FuzzyRule& operator [](int i) const;
+ FuzzyRuleSet& operator =(const FuzzyRuleSet& a);
+
+ //I/O operators
+ friend istream& operator>>(istream& is, FuzzyRuleSet& a);
+ friend ostream& operator<<(ostream& os, const FuzzyRuleSet& a);
+};
+
+#endif
diff --git a/fs/variablf.cpp b/fs/variablf.cpp
new file mode 100644
index 0000000..7ccbf0a
--- /dev/null
+++ b/fs/variablf.cpp
@@ -0,0 +1,495 @@
+#include "headfile.h"
+
+#include "memberf.h"
+#include "variablf.h"
+#include "vector.h"
+
+//constructors
+
+FuzzyVariable::FuzzyVariable(int a,float b,float c):
+setSize(a),startPoint(b),endPoint(c)
+{
+ fuzzySet= new FuzzyMember[setSize];
+ assert(fuzzySet !=0);
+ char *string="default";
+ int length=strlen(string)+2;
+ variableName= new char[length];
+ assert(variableName !=0);
+ strncpy(variableName,string,length);
+}
+
+FuzzyVariable::FuzzyVariable(int a,float b,float c,char *str):
+setSize(a),startPoint(b),endPoint(c)
+{
+ int length=strlen(str)+2;
+ variableName= new char[length];
+ assert(variableName !=0);
+ strncpy(variableName,str,length);
+
+ fuzzySet= new FuzzyMember[setSize];
+ assert(fuzzySet !=0);
+}
+
+
+
+FuzzyVariable::FuzzyVariable(const FuzzyVariable& a):
+setSize(a.setSize),startPoint(a.startPoint),endPoint(a.endPoint)
+{
+ fuzzySet= new FuzzyMember[setSize];
+ assert(fuzzySet !=0);
+
+ for (int i=0;i<setSize;i++)
+ fuzzySet[i]=a.fuzzySet[i];
+
+ int length=strlen(a.variableName)+2;
+ variableName= new char[length];
+ assert(variableName !=0);
+ strncpy(variableName,a.variableName,length);
+}
+
+//member functions
+
+FuzzyVariable&
+FuzzyVariable::standardVariable()
+{
+ float stepSize=(endPoint-startPoint)/(2*setSize);
+
+ delete []fuzzySet;
+ fuzzySet=new FuzzyMember[setSize];
+ assert(fuzzySet !=0);
+
+ FuzzyMember first(startPoint,startPoint+2*stepSize,"leftTriangle");
+
+ //most left rule
+ fuzzySet[0]=first;
+
+ //most right rule
+ first.change_member(endPoint-2*stepSize,endPoint,"rightTriangle");
+ fuzzySet[setSize-1]=first;
+
+ //rules inside
+ for (int i=1;i<(setSize-1);i++)
+ {
+ first.change_member(startPoint+(2*i-1)*stepSize,startPoint+(2*i+3)*stepSize,"Triangle");
+ fuzzySet[i]=first;
+ }
+ return *this;
+}
+
+
+FuzzyVariable&
+FuzzyVariable::change_setSize(const int& a)
+{
+ setSize=a;
+ return *this;
+}
+
+FuzzyVariable&
+FuzzyVariable::change_startPoint(const float& a)
+{
+ startPoint=a;
+ return *this;
+}
+
+FuzzyVariable&
+FuzzyVariable::change_endPoint(const float& a)
+{
+ endPoint=a;
+ return *this;
+}
+
+FuzzyVariable&
+FuzzyVariable::change_variableName(char *str)
+{
+ delete []variableName;
+ int length=strlen(str)+2;
+ variableName=new char[length];
+ strncpy(variableName,str,length);
+
+ return *this;
+}
+
+char*
+FuzzyVariable::setMeaning(const int& a,const int& b) const
+{
+ if (a!=setSize)
+ {
+ fprintf(stderr,"wrong setSize in member setMenaing");
+ exit(1);
+ }
+
+ char str[32];
+
+ if (a==3) //three sets for the variable
+ {
+ switch(b)
+ {
+ case 0:
+ strcpy(str,"Low");
+ break;
+ case 1:
+ strcpy(str,"Median");
+ break;
+ default:
+ strcpy(str,"High");
+ }
+ }
+ else if (a==5)
+ {
+ switch(b)
+ {
+ case 0:
+ strcpy(str,"veryLow");
+ break;
+ case 1:
+ strcpy(str,"Low");
+ break;
+ case 2:
+ strcpy(str,"Median");
+ break;
+ case 3:
+ strcpy(str,"High");
+ break;
+ default:
+ strcpy(str,"veryHigh");
+ }
+ }
+ else if (a==7)
+ {
+ switch(b)
+ {
+ case 0:
+ strcpy(str,"veryLow");
+ break;
+ case 1:
+ strcpy(str,"Low");
+ break;
+ case 2:
+ strcpy(str,"littleLow");
+ break;
+ case 3:
+ strcpy(str,"Median");
+ break;
+ case 4:
+ strcpy(str,"littleHigh");
+ break;
+ case 5:
+ strcpy(str,"High");
+ break;
+ default:
+ strcpy(str,"veryHigh");
+ }
+ }
+ else if (a==9)
+ {
+ switch(b)
+ {
+ case 0:
+ strcpy(str,"extremeLow");
+ break;
+ case 1:
+ strcpy(str,"veryLow");
+ break;
+ case 2:
+ strcpy(str,"Low");
+ break;
+ case 3:
+ strcpy(str,"littleLow");
+ break;
+ case 4:
+ strcpy(str,"Median");
+ break;
+ case 5:
+ strcpy(str,"littleHigh");
+ break;
+ case 6:
+ strcpy(str,"High");
+ break;
+ case 7:
+ strcpy(str,"veryHigh");
+ break;
+ default:
+ strcpy(str,"extremeHigh");
+ }
+ }
+ else
+ {
+ if (b>=10)
+ {
+ int temdec=b/10;
+ temdec=temdec+48;
+ strcpy(str,(char*)&temdec);
+ int tmp=b%10 +48;
+ strcat(str,(char*)&tmp);
+ }
+ else
+ {
+ int tmp=b+48;
+ strcpy(str,(char*)&tmp);
+ }
+ strcat(str,"th");
+ strcat(str,"Part");
+ }
+ char* tmp=str;
+ return tmp;
+}
+
+vector<int>
+FuzzyVariable::setFireFlag(const float& a) const
+{
+ vector<int> newVec(setSize);
+ for (int i=0;i<setSize;i++)
+ newVec[i]=this->fuzzySet[i].member_flag(a);
+
+ return newVec;
+}
+
+float
+FuzzyVariable::output(const float& a, const int& b) const
+{
+ assert(b !=0);
+ vector<int> vec=this->setFireFlag(a);
+ if (vec[abs(b)-1]==0)
+ {
+ fprintf(stderr,"try to fire a unfired set");
+ exit(1);
+ }
+ float tmp;
+ if (b>0)
+ tmp=this->fuzzySet[b-1].memberFunction(a);
+ else
+ tmp=this->fuzzySet[abs(b)-1].not(a);
+
+ return tmp;
+}
+
+float
+FuzzyVariable::defuzzifyMax(const int& a,const vector<float>& b) const
+{
+ assert((a==setSize)&&(a==b.len()));
+ //check whther b is zero vector
+ int sum=0;
+ for (int i=0;i<b.len();i++)
+ {
+ if (b[i]>0)
+ sum++;
+ }
+
+ if (sum==0)
+ {
+ cerr<<"try to defuzzy while no rule fired in defuzzifyMax in variablf.cpp"<<endl;
+ exit(1);
+ }
+
+ int ind=b.maximum_index();
+
+ float first=fuzzySet[ind].get_startPoint();
+ float second=fuzzySet[ind].get_endPoint();
+ return (first+(second-first)/2);
+}
+
+int
+FuzzyVariable::defuzzyMax_index(const int&a, const vector<float>& b) const
+{
+ assert((a==setSize)&&(a==b.len()));
+
+ int sum=0;
+ for (int i=0;i<b.len();i++)
+ {
+ if (b[i]>0)
+ sum++;
+ }
+
+ if (sum==0)
+ {
+ cerr<<"try to defuzzy while no rule fired in defuzzyMax_index in variablf.cpp"<<endl;
+ exit(1);
+ }
+
+ return b.maximum_index();
+}
+
+float
+FuzzyVariable::defuzzyCentroid_add(const int& a,const vector<float>& b) const
+{
+ assert((a==setSize)&&(a==b.len()));
+
+ //check whther b is zero vector
+ int sum=0;
+ for (int i=0;i<b.len();i++)
+ {
+ if (b[i]>0)
+ sum++;
+ }
+
+ if (sum==0)
+ {
+ cerr<<"try to defuzzy while no rule fired in defuzzyCentroid_add in variablf.cpp"<<endl;
+ exit(1);
+ }
+
+ float stepsize=(endPoint-startPoint)/100; //divide into 100 intervals
+ vector<float> res(2); //first:nom second: dem
+ res[0]=0;
+ res[1]=0;
+ for (i=0;i<setSize;i++)
+ {
+ if (b[i] !=0)
+ res +=fuzzySet[i].centroid(b[i],stepsize);
+ }
+ //maybe should add the left and right shoulder's affect
+ int totalstep;
+ if (b[0] !=0)
+ {
+ if ((strncmp(fuzzySet[0].get_functionType(),"leftTriangle",strlen("leftTriangle")-1)==1)||(strncmp(fuzzySet[0].get_functionType(),"reverseSigmoid",strlen("reverseSigmoid")-1)==1))
+ {
+ totalstep=(fuzzySet[0].get_startPoint()-startPoint)/stepsize;
+ assert(totalstep>=0);
+ for (i=0;i<totalstep;i++)
+ {
+ res[0] +=(startPoint+i*stepsize)*b[0];
+ res[1] +=b[0];
+ }
+ }
+ }
+
+ if (b[setSize-1] !=0)
+ {
+ if ((strncmp(fuzzySet[setSize-1].get_functionType(),"rightTriangle",strlen("rightTriangle")-1)==1)||(strncmp(fuzzySet[setSize-1].get_functionType(),"Sigmoid",strlen("Sigmoid")-1)==1))
+ {
+ float tmp=fuzzySet[setSize-1].get_endPoint();
+ totalstep=(endPoint-tmp)/stepsize;
+ assert(totalstep>=0);
+ for (i=0;i<totalstep;i++)
+ {
+ res[0] +=(tmp+i*stepsize)*b[setSize-1];
+ res[1] +=b[setSize-1];
+ }
+ }
+ }
+ return res[0]/res[1];
+}
+
+float
+FuzzyVariable::defuzzyCentroid(const int& a,const vector<float>& b) const
+{
+ assert((a==setSize)&&(a==b.len()));
+
+ //check whther b is zero vector
+ int sum=0;
+ for (int i=0;i<b.len();i++)
+ {
+ if (b[i]>0)
+ sum++;
+ }
+
+ if (sum==0)
+ {
+ cerr<<"try to defuzzy while no rule fired in defuzzyCentroid in variablf.cpp"<<endl;
+ exit(1);
+ }
+
+ float stepsize=(endPoint-startPoint)/100; //divide into 100 intervals
+
+ vector<int> vecint(setSize); //store setFireFlag
+ vector<float> vecfloat(setSize); //store membership value
+ float tmp,num,dem; //tmp: set value, num &dem for centroid
+ num=0.0;
+ dem=0.0;
+ for (i=0;i<100;i++)
+ {
+ tmp=startPoint+i*stepsize;
+ vecint=this->setFireFlag(tmp); //get setFireFlag
+
+ for (int j=0;j<setSize;j++)
+ {
+ vecfloat[j]=0;
+ if (vecint[j]==1) //this set fired?
+ {
+ vecfloat[j]=fuzzySet[j].memberFunction(tmp); //membership value
+ if (vecfloat[j]>b[j]) //compare two membership values
+ vecfloat[j]=b[j]; //minimum
+ }
+ }
+
+ int ind=vecfloat.maximum_index(); //maximum index
+ num +=tmp*(vecfloat[ind]);
+ dem +=1.0*(vecfloat[ind]);
+ }
+
+ return num/dem; //centroid
+}
+
+
+//operators
+
+FuzzyMember&
+FuzzyVariable::operator [] (int i) const
+{
+ assert(i>=0&&i<setSize);
+ return fuzzySet[i];
+}
+
+FuzzyVariable&
+FuzzyVariable::operator =(const FuzzyVariable& a)
+{
+ if ((&a)==this) return *this;
+
+ delete []fuzzySet;
+ delete []variableName;
+
+ setSize=a.setSize;
+ startPoint=a.startPoint;
+ endPoint=a.endPoint;
+
+ fuzzySet=new FuzzyMember[setSize];
+ assert(fuzzySet !=0);
+
+ for (int i=0;i<setSize;i++)
+ fuzzySet[i]=a.fuzzySet[i];
+
+ int length=strlen(a.variableName)+2;
+ variableName =new char[length];
+ assert(variableName !=0);
+ strncpy(variableName,a.variableName,length);
+
+ return *this;
+}
+
+//friend I/O
+
+ostream& operator << (ostream& os, const FuzzyVariable& a)
+{
+ os<<a.variableName<<"\t";
+
+ os<<a.setSize<<"\t";
+ os<<a.startPoint<<"\t";
+ os<<a.endPoint<<endl;
+
+ //os<<"FuzzySets:"<<endl;
+ for (int i=0;i<a.setSize;i++)
+ os<<a.fuzzySet[i];
+ return os;
+}
+
+istream& operator >>(istream& is, FuzzyVariable& a)
+{
+ char str[256];
+ int tmpsetSize;
+ float tmpstart,tmpend;
+
+ is>>str>>tmpsetSize>>tmpstart>>tmpend;
+
+ FuzzyVariable tmpVariable(tmpsetSize,tmpstart,tmpend,str);
+
+ for (int i=0;i<tmpsetSize;i++)
+ is>>tmpVariable.fuzzySet[i];
+
+ a=tmpVariable;
+
+ return is;
+}
+
+
+
+
diff --git a/fs/variablf.h b/fs/variablf.h
new file mode 100644
index 0000000..ad10958
--- /dev/null
+++ b/fs/variablf.h
@@ -0,0 +1,61 @@
+#ifndef __VARIABLF_H__
+#define __VARIABLF_H__
+
+#include "memberf.h"
+#include "vector.h"
+
+// Listing 8.6 Definiton of class FuzzyVariable
+class FuzzyVariable
+{
+private:
+ int setSize;
+ float startPoint;
+ float endPoint;
+ char *variableName;
+ FuzzyMember *fuzzySet;
+
+public:
+ //constructors
+ FuzzyVariable():setSize(0),startPoint(0),endPoint(0),variableName(0),fuzzySet(0){}
+ FuzzyVariable(int a,float b,float c);
+ FuzzyVariable(int a,float b,float c,char *str);
+ FuzzyVariable(const FuzzyVariable& a);
+
+ //destructor
+ ~FuzzyVariable(){delete []fuzzySet;delete []variableName;}
+
+ //member functions
+ FuzzyVariable& standardVariable();
+ char* get_variableName() const {return variableName;}
+ int get_setSize() const {return setSize;}
+ float get_startPoint() const {return startPoint;}
+ float get_endPoint() const {return endPoint;}
+
+ FuzzyVariable& change_setSize(const int& a);
+ FuzzyVariable& change_startPoint(const float& a);
+ FuzzyVariable& change_endPoint(const float& a);
+ FuzzyVariable& change_variableName(char *str);
+
+ char* setMeaning(const int& a,const int& b) const; //a:setSize b:which set
+ vector<int> setFireFlag(const float& a) const ;
+ float output(const float& a,const int& b) const; //b:set being chosen, output of set b
+ float defuzzifyMax(const int& a,const vector<float>& b) const;
+ //return the value
+ int defuzzyMax_index(const int& a,const vector<float>& b) const;
+ //return the set index
+ float defuzzyCentroid_add(const int& a,const vector<float>& b) const;
+ float defuzzyCentroid(const int& a,const vector<float>& b) const;
+
+ //operators
+ FuzzyMember& operator [] (int i) const;
+ FuzzyVariable& operator =(const FuzzyVariable& a);
+
+ //friend operator I/O
+ friend istream& operator >> (istream& is,FuzzyVariable& a);
+ friend ostream& operator << (ostream& os,const FuzzyVariable& a);
+};
+
+#endif
+
+
+
diff --git a/fs/vector.h b/fs/vector.h
new file mode 100644
index 0000000..a0285b9
--- /dev/null
+++ b/fs/vector.h
@@ -0,0 +1,212 @@
+#ifndef __VECTOR_H__
+#define __VECTOR_H__
+
+#include "headfile.h"
+
+// Listing 8.1 Definition of template class vector
+template <class Type>
+class vector
+{
+private:
+ int row; //length of array (column);
+ Type *arr; //pointer to the array;
+public:
+ //constructors
+ vector():row(0),arr(0) {}
+ vector(int a);
+ vector(const vector<Type>& a);
+ vector(int a,Type* b);
+ ~vector(){delete []arr;}
+
+ //operators
+ vector<Type>& operator =(const vector<Type>& a);
+ vector<Type>& operator +=(const vector<Type>& a);
+ int operator !=(const vector<Type>& a) const;
+ int operator <(const vector<Type>& a) const {return (row<a.row);}
+
+ Type& operator [] (int i) const {assert(i>=0&&i<row); return arr[i];}
+
+ //member functions
+ int len() const {return row;}
+ Type sum() const;
+ int maximum_index() const;
+ vector<Type>& changeSize(const int& a);
+ Type minimum() const;
+
+ friend vector<Type> operator |(const vector<Type>& a,const vector<Type>& b);
+ friend istream& operator >> (istream& is,vector<Type>& a);
+ friend ostream& operator << (ostream& os, const vector<Type>& a);
+};
+
+
+template <class Type>
+vector<Type>::vector(int a):
+ row(a)
+{
+ arr=new Type [row];
+ assert(arr!=0);
+}
+
+template <class Type>
+vector<Type>::vector(int a,Type* b):
+ row(a)
+{
+ arr=new Type[row];
+ assert(arr !=0);
+ for (int i=0;i<row;i++)
+ arr[i]=b[i];
+}
+
+
+template <class Type>
+vector<Type>::vector(const vector<Type>& a):
+ row(a.row)
+{
+ arr = new Type [row];
+ assert(arr!=0);
+ for (int i=0;i<row;i++)
+ arr[i]=a[i];
+}
+
+template <class Type>
+vector<Type>&
+vector<Type>::changeSize(const int& a)
+{
+ delete []arr;
+ row=a;
+ arr=new Type[row];
+ assert(arr!=0);
+ return *this;
+}
+
+
+template <class Type>
+vector<Type>&
+vector<Type>::operator =(const vector<Type>& a)
+{
+ if ((&a)==this) return *this;
+
+ delete []arr;
+ row=a.row;
+ arr = new Type [row];
+ assert(arr!=0);
+ for (int i=0;i<row;i++)
+ arr[i]=a[i];
+ return *this;
+}
+
+template <class Type>
+vector<Type>&
+vector<Type>::operator +=(const vector<Type>& a)
+{
+ assert(row==a.row);
+ for (int i=0;i<row;i++)
+ arr[i] +=a[i];
+
+ return *this;
+}
+
+template <class Type>
+int
+vector<Type>::operator !=(const vector<Type>& a) const
+{
+ if ((&a)==this) return 0;
+
+ if (row !=a.row) return 1;
+
+ for (int i=0;i<row;i++)
+ {
+ if (arr[i] !=a[i]) return 1;
+ }
+ return 0;
+}
+
+//member functions
+
+template <class Type>
+Type
+vector<Type>::sum() const
+{
+ Type tmp=arr[0];
+
+ for (int i=1;i<row;i++)
+ tmp +=arr[i];
+
+ return tmp;
+}
+
+template <class Type>
+int
+vector<Type>::maximum_index() const
+{
+ Type max=arr[0];
+ int ind=0;
+
+ for (int i=1;i<row;i++)
+ {
+ if (max<arr[i])
+ {
+ max=arr[i];
+ ind=i;
+ }
+ }
+
+ return ind;
+}
+
+template <class Type>
+Type
+vector<Type>:: minimum() const
+{
+ Type mini=arr[0];
+ for (int i=1;i<row;i++)
+ {
+ if (arr[i]<mini)
+ mini=arr[i];
+ }
+
+ return mini;
+}
+
+
+//friend operators
+template <class Type>
+vector<Type> operator | (const vector<Type>& a,const vector<Type>& b)
+{
+ vector<Type> newVec(a.row+b.row);
+ for (int i=0;i<a.row;i++)
+ newVec[i]=a[i];
+ for (i=0;i<b.row;i++)
+ newVec[a.row+i]=b[i];
+ return newVec;
+}
+
+template <class Type>
+istream& operator >> (istream& is,vector<Type>& a)
+{
+ for (int i=0;i<a.row;i++)
+ is >> a[i];
+ return is;
+}
+
+template <class Type>
+ostream& operator << (ostream& os,const vector<Type>& a)
+{
+ int sum=0;
+ for (int j=0;j<a.row;j++)
+ {
+ os << a[j]<<"\t";
+ sum++;
+ if ((sum%8)==0)
+ {
+ os <<endl;
+ sum=0;
+ }
+ }
+ os<<endl;
+ return os;
+}
+
+#endif // _vector_h_
+
+
|
btbytes/ci
|
4912294e4c419d386b4c8b059e3c6f98097ce4a5
|
added Evolutionary back-propagation neural network
|
diff --git a/pso_nn/Makefile b/pso_nn/Makefile
new file mode 100644
index 0000000..c940796
--- /dev/null
+++ b/pso_nn/Makefile
@@ -0,0 +1,2 @@
+all: bp.h definiti.h headfile.h mem_loc.h nnet.h psostate.h sigmoid.h
+ gcc -Wall -lm bp.c main.c mem_loc.c psostate.c sigmoid.c -o pso_nn
diff --git a/pso_nn/bp.c b/pso_nn/bp.c
new file mode 100644
index 0000000..a8bcb8e
--- /dev/null
+++ b/pso_nn/bp.c
@@ -0,0 +1,1213 @@
+#include "headfile.h"
+#include "definiti.h"
+#include "bp.h"
+#include "mem_loc.h"
+#include "sigmoid.h"
+
+
+#define MAX_NUM_CHARS 100
+
+/**************************************************************/
+/* Static Variable and Const Variable with File level scope */
+/**************************************************************/
+ static BP_Type bp;
+ static BP_Pattern_Set_Type patset;
+ static BP_State_Type bp_cur_state;
+
+ static char pat_file_name[MAX_NUM_CHARS];
+ static float *target_out;
+ static int pso_dimension;
+/**************************************************************/
+/* Function Prototypes for functions with file level scope */
+/**************************************************************/
+static void read_bp_parameters(char *dataFile); // read BP parameters from I/O file
+static void allocate_bp_memory(void); // allocate BP memory spaces
+static void free_bp_memory(void); // free BP memory spaces
+static void store_bp_results(void); // write BP results to I/O file
+
+static void bp_initialize(void);
+static void bp_initialize_weights(void);
+static void bp_state_handler(int); // BP state handle routine
+
+static void bp_get_pattern(void);
+static void bp_feedforward_input(void);
+static void bp_feedforward_hidden(void);
+static void bp_feedforward_output(void);
+static void bp_back_propagation_output(void);
+static void bp_back_propagation_hiddens(void);
+static void bp_batch_temp_weight_step_change(void);
+static void bp_next_pattern(void);
+static void bp_weight_step_change(void);
+static void bp_weight_change(void);
+static void bp_next_generation(void);
+static void bp_update_learning_rate(void);
+static void bp_update_momentum_rate(void);
+static void bp_training_done(void);
+static void bp_recall_done(void);
+
+static float activate_function(float, int);
+static float nn_linear_function(float);
+static float nn_gausian_function(float);
+static float nn_sigmoid_function(float);
+
+static void print_net_parameters(void);
+static void print_mse(void);
+static void update_recall_result(void);
+
+/**************************************************************/
+/* Function Definitions */
+/**************************************************************/
+
+
+/**************************************************************/
+/* BP Start and clean routines and interfaces */
+/**************************************************************/
+
+/**************************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+***************************************************************/
+
+void BP_Start_Up (char *dataFile)
+{
+ read_bp_parameters(dataFile);
+ allocate_bp_memory(); // allocate memory for BP
+ bp_initialize();
+}
+
+/*************************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+*************************************************************/
+void BP_Clean_Up (void)
+{
+ store_bp_results();
+ free_bp_memory(); // free memory space of BP
+}
+
+/************************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+*************************************************************/
+void BP_Weights_From_PSO (float *vec)
+{ int idx_layer, idx_cn, idx_pn;
+ int counter = 0;
+
+ for (idx_layer = 1; idx_layer < (bp.arch.size) ; idx_layer++ )
+ { // loop through the layers
+ for (idx_cn = 0; idx_cn < (bp.arch.layers[idx_layer].size) ; idx_cn++)
+ { // loop through the neurons in the current layer
+ for (idx_pn = 0; idx_pn <= (bp.arch.layers[idx_layer - 1].size) ; idx_pn++)
+ { // loop through the connect weights of the current neurons
+ bp.arch.layers[idx_layer].neurons[idx_cn].w[idx_pn] = vec[counter++];
+ //fscanf(fout,"%f",&(bp.arch.layers[idx_layer].neurons[idx_cn].w[idx_pn]));
+ }
+ }
+ }
+ if (counter != pso_dimension)
+ {
+ printf("not match in BP_Weights_From_PSO routine 1 \n");
+ exit(1);
+ }
+}
+
+// Listing 6.15 The BP_Get_PSO_Dimension() routine
+/************************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+*************************************************************/
+int BP_Get_PSO_Dimension (void)
+{
+ int idx_l;
+ pso_dimension = 0;
+
+ for (idx_l = 0; idx_l < (bp.arch.size - 1); idx_l++)
+ {
+ pso_dimension += ((bp.arch.layers[idx_l].size + 1) * bp.arch.layers[idx_l + 1].size);
+ }
+ return(pso_dimension);
+}
+
+/************************************************************/
+/* BP functons */
+/************************************************************/
+/************************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+*************************************************************/
+float BP_Main_Loop (void)
+{
+ BOOLEAN running;
+
+ bp.mse = 0.0; // clean mean squared error
+ // bp randomly initialize weights
+
+ // bp initial setting
+ bp_cur_state = BP_GET_PATTERN;
+ bp.env.cur_gen = 0;
+ bp.env.cur_pat = 0;
+
+ running = TRUE;
+ while (running)
+ {
+ if ((bp_cur_state == BP_TRAINING_DONE) || (bp_cur_state == BP_RECALL_DONE) )
+ {
+ running = FALSE;
+ }
+ bp_state_handler(bp_cur_state);
+ }
+ return(bp.mse); // bp.mse should be replaced with recognition error here
+
+}
+
+/************************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+*************************************************************/
+static void read_bp_parameters (char *dataFile)
+{
+ int idx_i;
+ FILE *frun;
+
+ // open the runfile to input parameters
+ if ((frun=fopen(dataFile,"r"))==NULL)
+ {
+ printf("Cant read run file");
+ exit(1);
+ }
+
+ // read BP's parameters from run file
+ // read BP's environment data
+ //fscanf(frun, "%d",&(bp.env.operation_mode)); // training or recall
+ //fscanf(frun, "%d",&(bp.env.train_mode)); // training mode if in training operation mode
+ //fscanf(frun, "%f",&(bp.env.alpha)); // learning rate
+ //fscanf(frun, "%f",&(bp.env.gama)); // momentum rate
+ //fscanf(frun, "%f",&(bp.env.criterion)); // training error criterion for termination
+ //fscanf(frun, "%d",&(bp.env.max_gen)); // maximum number of generations
+ //fscanf(frun, "%d",&(bp.env.max_tra_pat)); // total number of training patterns
+
+ // read BP's Arch
+ fscanf(frun, "%d",&(bp.arch.size)); // number of layers
+ // allocate memory for numbers of neurons in hidden layers
+ bp.arch.hidden_number = calloc((bp.arch.size - 2),sizeof(int));
+ if (!(bp.arch.hidden_number))
+ {
+ printf("Allocation error in read_bp_parameters() - aborting.");
+ exit(1);
+ }
+ // read in numbers of neurons in hidden layers
+ for (idx_i = 0 ; (idx_i < (bp.arch.size - 2)) ; idx_i++)
+ {
+ fscanf(frun, "%d",&(bp.arch.hidden_number[idx_i]));
+ }
+
+ // read in/out pattern parameters
+ fscanf(frun, "%d",&(patset.size)); // number of pattern
+ // read pattern input dimension
+ fscanf(frun, "%d",&(patset.dim_in)); // input dimension
+ // read pattern output dimension
+ fscanf(frun, "%d",&(patset.dim_out)); // output dimension
+ // read pattern file name
+ fscanf(frun, "%s", pat_file_name);
+
+ fclose(frun);
+
+ bp.env.operation_mode = 1;
+ bp.env.train_mode = 0;
+ bp.env.alpha = 0.075;
+ bp.env.gama = 0.15;
+ bp.env.criterion = 0.01;
+ bp.env.max_gen = 1;
+ bp.env.max_tra_pat = 1;
+
+ print_net_parameters();
+
+}
+
+static void print_net_parameters (void)
+{
+ int idx_i;
+ // test
+ //printf( "%d\n",(bp.env.operation_mode)); // training or recall
+ //printf( "%d\n",(bp.env.train_mode)); // training mode if in training operation mode
+ //printf( "%f\n",(bp.env.alpha)); // learning rate
+ //printf( "%f\n",(bp.env.gama)); // momentum rate
+ //printf( "%f\n",(bp.env.criterion)); // training error criterion for termination
+ //printf( "%d\n",(bp.env.max_gen)); // maximum number of generations
+ //printf( "%d\n\n",(bp.env.max_tra_pat)); // total number of training patterns
+
+ printf( "number of layers: %d\n",(bp.arch.size)); // number of layers
+ for (idx_i = 0 ; (idx_i < (bp.arch.size - 2)) ; idx_i++)
+ {
+ printf( "number of hidden layer %d's neurons %d\n",idx_i, bp.arch.hidden_number[idx_i]);
+ }
+ printf( "number of patterns: %d\n",(patset.size)); // number of pattern
+ // read pattern input dimension
+ printf( "pattern input dimension: %d\n",(patset.dim_in)); // input dimension
+ // read pattern output dimension
+ printf( "pattern output dimnesion: %d\n",(patset.dim_out)); // output dimension
+ // read pattern file name
+ printf( "pattern file name: %s\n", pat_file_name);
+}
+
+static void print_mse (void)
+{
+ printf("%f\n",bp.mse); // output mean squared error
+}
+
+static void update_recall_result(void)
+{
+ int idx_out, res_nn, res_ta;
+ float max = 0.0;
+
+ for (idx_out = 0; idx_out < (bp.arch.layers[bp.arch.size - 1].size ); idx_out++)
+ { // loop throught the neurons of the output layer
+ if (bp.arch.layers[bp.arch.size - 1].neurons[idx_out].out > max )
+ {
+ max = bp.arch.layers[bp.arch.size - 1].neurons[idx_out].out;
+ res_nn = idx_out;
+ }
+ }
+
+ max = 0.0;
+ for (idx_out = 0; idx_out < (bp.arch.layers[bp.arch.size - 1].size ); idx_out++)
+ { // loop throught the neurons of the output layer
+ if (target_out[idx_out] > max)
+ {
+ max = target_out[idx_out];
+ res_ta = idx_out;
+ }
+ }
+ if (res_nn == res_ta)
+ {
+ bp.mse += 1;
+ }
+}
+
+/************************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+*************************************************************/
+static void allocate_bp_memory (void)
+{
+ int idx, idx_i;
+
+ // allocate memory for BP net
+ bp.arch.layers = calloc(bp.arch.size,sizeof(NN_Layer_Arch_Type));
+
+ //allocate memory for input layer
+ bp.arch.layers[0].size = patset.dim_in;
+ bp.arch.layers[0].neurons = calloc(patset.dim_in,sizeof(Neuron_Type));
+ bp.arch.layers[0].layer_type = NN_INPUT_LAYER;
+ // specify and allocate memory for neurons of input layer
+ for (idx =0 ; idx < bp.arch.layers[0].size ; idx++)
+ {
+ bp.arch.layers[0].neurons[idx].neuron_function = NN_LINEAR_FUNCTION;
+ FVectorAllocate(&(bp.arch.layers[0].neurons[idx].delta_w),1);
+ FVectorAllocate(&(bp.arch.layers[0].neurons[idx].temp_delta_w),1);
+ FVectorAllocate(&(bp.arch.layers[0].neurons[idx].w),1);
+ }
+
+ // allocate memory for hidden layers
+ for (idx =0 ; idx < (bp.arch.size - 2);idx++ )
+ {
+ bp.arch.layers[idx + 1].size = bp.arch.hidden_number[idx];
+ bp.arch.layers[idx + 1].neurons = calloc(bp.arch.layers[idx + 1].size,sizeof(Neuron_Type));
+ bp.arch.layers[idx + 1].layer_type = NN_HIDDEN_LAYER;
+ // specify and allocate memory for neurons of hiddem layer
+ for (idx_i =0 ; idx_i < bp.arch.layers[idx + 1].size ; idx_i++)
+ {
+ bp.arch.layers[idx + 1].neurons[idx_i].neuron_function = NN_SIGMOID_FUNCTION;
+ FVectorAllocate(&(bp.arch.layers[idx + 1].neurons[idx_i].delta_w),bp.arch.layers[idx].size + 1); // add one bias
+ FVectorAllocate(&(bp.arch.layers[idx + 1].neurons[idx_i].temp_delta_w),bp.arch.layers[idx].size + 1);
+ FVectorAllocate(&(bp.arch.layers[idx + 1].neurons[idx_i].w),bp.arch.layers[idx].size + 1);
+ }
+ }
+
+ // allocate memory for output layer
+ bp.arch.layers[bp.arch.size - 1].size = patset.dim_out;
+ bp.arch.layers[bp.arch.size - 1].neurons = calloc(patset.dim_out,sizeof(Neuron_Type));
+ bp.arch.layers[bp.arch.size - 1].layer_type = NN_OUTPUT_LAYER;
+ // specify and allocate memory for neurons of output layer
+ for (idx =0 ; idx < bp.arch.layers[bp.arch.size - 1].size ; idx++)
+ {
+ bp.arch.layers[bp.arch.size - 1].neurons[idx].neuron_function = NN_SIGMOID_FUNCTION;
+ FVectorAllocate(&(bp.arch.layers[bp.arch.size - 1].neurons[idx].delta_w),bp.arch.layers[bp.arch.size - 2].size +1);
+ FVectorAllocate(&(bp.arch.layers[bp.arch.size - 1].neurons[idx].temp_delta_w),bp.arch.layers[bp.arch.size - 2].size +1);
+ FVectorAllocate(&(bp.arch.layers[bp.arch.size - 1].neurons[idx].w),bp.arch.layers[bp.arch.size - 2].size + 1);
+ }
+
+ // allocate memory for pattern set
+ FMatrixAllocate(&(patset.patterns),patset.size,(patset.dim_in + patset.dim_out));
+
+ // allocate memory for target output
+ target_out = calloc(patset.dim_out,sizeof(float));
+}
+
+/************************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+*************************************************************/
+static void free_bp_memory (void)
+{
+ int idx, idx_i;
+ // free memory for pattern set
+ FMatrixFree(patset.patterns,patset.size);
+
+ // free memory for BP net
+ // free memory for output layer
+ for (idx =0 ; idx < bp.arch.layers[bp.arch.size - 1].size ; idx++)
+ {
+ free(bp.arch.layers[bp.arch.size - 1].neurons[idx].w);
+ free(bp.arch.layers[bp.arch.size - 1].neurons[idx].delta_w);
+ free(bp.arch.layers[bp.arch.size - 1].neurons[idx].temp_delta_w);
+ }
+ free(bp.arch.layers[bp.arch.size - 1].neurons);
+
+ // free memory for hidden layers
+ for (idx =0 ; idx < (bp.arch.size - 2);idx++ )
+ {
+ for (idx_i =0 ; idx_i < bp.arch.layers[idx + 1].size ; idx_i++)
+ {
+ free(bp.arch.layers[idx + 1].neurons[idx_i].w);
+ free(bp.arch.layers[idx + 1].neurons[idx_i].delta_w);
+ free(bp.arch.layers[idx + 1].neurons[idx_i].temp_delta_w);
+ }
+ free(bp.arch.layers[idx + 1].neurons);
+ }
+ // free memory for input layer
+ for (idx =0 ; idx < bp.arch.layers[0].size ; idx++)
+ {
+ free(bp.arch.layers[0].neurons[idx].w);
+ free(bp.arch.layers[0].neurons[idx].delta_w);
+ free(bp.arch.layers[0].neurons[idx].temp_delta_w);
+ }
+ free(bp.arch.layers[0].neurons);
+
+ free(bp.arch.layers);
+ free(bp.arch.hidden_number);
+
+ free(target_out);
+}
+
+/************************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+*************************************************************/
+static void bp_initialize (void)
+{
+ int idx_i,idx_j;
+ int stime;
+ long ltime;
+ FILE *frun;
+
+ // get the current calendar time
+ ltime = time(NULL);
+ stime = (unsigned) ltime/2;
+ srand(stime);
+
+ // bp read patterns from file
+ // open the pattern file to obtain in/out patterns
+ if ((frun=fopen(pat_file_name,"r"))==NULL)
+ {
+ printf("Cant read pattern file");
+ exit(1);
+ }
+
+ for (idx_i = 0; idx_i < patset.size; idx_i++)
+ {
+ for (idx_j = 0; idx_j < (patset.dim_in + patset.dim_out) ; idx_j++)
+ {
+ fscanf(frun, "%f",&(patset.patterns[idx_i][idx_j]));
+ //printf("%f\t",patset.patterns[idx_i][idx_j]);
+ }
+ //printf("\n");
+ }
+ fclose(frun);
+
+ bp.mse = 0.0; // clean mean squared error
+ // bp randomly initialize weights
+ //bp_initialize_weights();
+
+ // bp initial setting
+ bp_cur_state = BP_GET_PATTERN;
+ bp.env.cur_gen = 0;
+ bp.env.cur_pat = 0;
+}
+
+/************************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+*************************************************************/
+static void bp_initialize_weights (void)
+{
+ int idx_layer,idx_neuron,idx_weight;
+ float prob;
+ int idx_cn, idx_pn;
+ FILE *fout;
+
+ if (bp.env.operation_mode == NN_TRAINING)
+ {
+ // initialize weight for the neurons in the hidden layers
+ for (idx_layer = 1; idx_layer < ((bp.arch.size - 1)) ; idx_layer++)
+ {
+ for (idx_neuron = 0; idx_neuron < (bp.arch.layers[idx_layer].size); idx_neuron++)
+ {
+ for (idx_weight = 0; idx_weight < (bp.arch.layers[idx_layer - 1].size + 1); idx_weight++ )
+ {
+ prob = (rand()%1000)/1000.0 - 0.5;
+ bp.arch.layers[idx_layer].neurons[idx_neuron].w[idx_weight] = prob; // (-1,1)
+ bp.arch.layers[idx_layer].neurons[idx_neuron].delta_w[idx_weight] = 0.0;
+ bp.arch.layers[idx_layer].neurons[idx_neuron].temp_delta_w[idx_weight] = 0.0;
+ }
+ }
+ }
+
+ // initialize weight for the neurons in the output layer
+ for (idx_neuron = 0; idx_neuron < (bp.arch.layers[bp.arch.size - 1].size); idx_neuron++)
+ {
+ for (idx_weight = 0; idx_weight < (bp.arch.layers[bp.arch.size - 2].size + 1); idx_weight++ )
+ {
+ prob = (rand()%1000)/1000.0 - 0.5;
+ bp.arch.layers[bp.arch.size - 1].neurons[idx_neuron].w[idx_weight] = prob; // (-1,1)
+ bp.arch.layers[bp.arch.size - 1].neurons[idx_neuron].delta_w[idx_weight] = 0.0;
+ bp.arch.layers[bp.arch.size - 1].neurons[idx_neuron].temp_delta_w[idx_weight] = 0.0;
+ }
+ }
+ //store_bp_results();
+ }
+ else
+ { // RECALL operation mode
+ // read in weights from file
+ // open file for write
+ if ((fout=fopen("BP_res.txt","r"))==NULL)
+ {
+ printf("Cant open file for write BP training results");
+ exit(1);
+ }
+ for (idx_layer = 1; idx_layer < (bp.arch.size) ; idx_layer++ )
+ { // loop through the layers
+ for (idx_cn = 0; idx_cn < (bp.arch.layers[idx_layer].size) ; idx_cn++)
+ { // loop through the neurons in the current layer
+ for (idx_pn = 0; idx_pn <= (bp.arch.layers[idx_layer - 1].size) ; idx_pn++)
+ { // loop through the connect weights of the current neurons
+ fscanf(fout,"%f",&(bp.arch.layers[idx_layer].neurons[idx_cn].w[idx_pn]));
+ }
+ }
+ }
+ fclose(fout);
+ }
+}
+
+/************************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+*************************************************************/
+static void store_bp_results (void)
+{
+ // store weights
+ int idx_layer, idx_cn, idx_pn;
+ FILE *fout;
+
+ // open file for write
+
+ if (bp.env.operation_mode == NN_TRAINING)
+ {
+ if ((fout=fopen("BP_res.txt","w"))==NULL)
+ {
+ printf("Cant open file for reading BP weights");
+ exit(1);
+ }
+ for (idx_layer = 1; idx_layer < (bp.arch.size) ; idx_layer++ )
+ { // loop through the layers
+ for (idx_cn = 0; idx_cn < (bp.arch.layers[idx_layer].size) ; idx_cn++)
+ { // loop through the neurons in the current layer
+ for (idx_pn = 0; idx_pn <= (bp.arch.layers[idx_layer - 1].size) ; idx_pn++)
+ { // loop through the connect weights of the current neurons
+ fprintf(fout,"%f\n",bp.arch.layers[idx_layer].neurons[idx_cn].w[idx_pn]);
+ }
+ }
+ }
+ fprintf(fout,"\n\nmse = %f\n",bp.mse);
+ fclose(fout);
+ }
+ else // RECALL
+ {
+
+ }
+}
+
+/************************************************************/
+/* BP State Handlers */
+/************************************************************/
+
+/************************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+*************************************************************/
+static void bp_state_handler (int state_index)
+{
+ switch (state_index)
+ {
+ case BP_GET_PATTERN:
+ bp_get_pattern();
+ break;
+ case BP_FEEDFORWARD_INPUT:
+ bp_feedforward_input();
+ break;
+ case BP_FEEDFORWARD_HIDDEN:
+ bp_feedforward_hidden();
+ break;
+ case BP_FEEDFORWARD_OUTPUT:
+ bp_feedforward_output();
+ break;
+ case BP_BACK_PROPAGATION_OUTPUT:
+ bp_back_propagation_output();
+ break;
+ case BP_BACK_PROPAGATION_HIDDENS:
+ bp_back_propagation_hiddens();
+ break;
+ case BP_BATCH_TEMP_WEIGHT_STEP_CHANGE:
+ bp_batch_temp_weight_step_change();
+ break;
+ case BP_NEXT_PATTERN:
+ bp_next_pattern();
+ break;
+ case BP_WEIGHT_STEP_CHANGE:
+ bp_weight_step_change();
+ break;
+ case BP_WEIGHT_CHANGE:
+ bp_weight_change();
+ break;
+ case BP_NEXT_GENERATION:
+ bp_next_generation();
+ break;
+ case BP_UPDATE_LEARNING_RATE:
+ bp_update_learning_rate();
+ break;
+ case BP_UPDATE_MOMENTUM_RATE:
+ bp_update_momentum_rate();
+ break;
+ case BP_TRAINING_DONE:
+ bp_training_done();
+ break;
+ case BP_RECALL_DONE:
+ bp_recall_done();
+ break;
+ default:
+ break;
+ }
+}
+
+/************************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+*************************************************************/
+static void bp_get_pattern (void)
+{
+ int idx;
+
+ for (idx = 0; idx < (bp.arch.layers[0].size); idx++)
+ {
+ bp.arch.layers[0].neurons[idx].in = patset.patterns[bp.env.cur_pat][idx];
+ }
+ for (idx = 0; idx < patset.dim_out; idx++ )
+ {
+ target_out[idx] = patset.patterns[bp.env.cur_pat][patset.dim_in + idx];
+ //printf("%d: %f\n",bp.env.cur_pat, target_out[idx]);
+ }
+
+ bp_cur_state = BP_FEEDFORWARD_INPUT;
+}
+
+/************************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+*************************************************************/
+static void bp_feedforward_input(void)
+{
+ int idx;
+
+ for (idx = 0; idx < (bp.arch.layers[0].size); idx++)
+ {
+ bp.arch.layers[0].neurons[idx].out = bp.arch.layers[0].neurons[idx].in;
+ }
+
+ bp_cur_state = BP_FEEDFORWARD_HIDDEN;
+}
+
+/************************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+*************************************************************/
+static void bp_feedforward_hidden (void)
+{
+ int idx, idx_prev,idx_cur;
+ float sum;
+
+ for (idx = 1; idx < ( bp.arch.size - 1); idx++)
+ { // loop through the hidden layers
+ for (idx_cur = 0; idx_cur < (bp.arch.layers[idx].size ); idx_cur++)
+ { // loop throught the neurons of the current hidden layer
+ sum = 0.0;
+ for ( idx_prev = 0; idx_prev < (bp.arch.layers[idx - 1].size ); idx_prev++)
+ { // loop through the outputs of the previous layer
+ sum += (bp.arch.layers[idx -1].neurons[idx_prev].out ) * (bp.arch.layers[idx].neurons[idx_cur].w[idx_prev] );
+ }
+ sum += (bp.arch.layers[idx].neurons[idx_cur].w[bp.arch.layers[idx - 1].size] );
+ bp.arch.layers[idx].neurons[idx_cur].in = sum;
+ bp.arch.layers[idx].neurons[idx_cur].out = activate_function(sum,bp.arch.layers[idx].neurons[idx_cur].neuron_function);
+ }
+ }
+ bp_cur_state = BP_FEEDFORWARD_OUTPUT;
+}
+
+/************************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+*************************************************************/
+static void bp_feedforward_output (void)
+{
+ int idx_out, idx_prev;
+ float sum;
+ for (idx_out = 0; idx_out < (bp.arch.layers[bp.arch.size - 1].size ); idx_out++)
+ { // loop throught the neurons of the output layer
+ sum = 0.0;
+ for ( idx_prev = 0; idx_prev < (bp.arch.layers[bp.arch.size - 2].size ); idx_prev++)
+ { // loop through the outputs of the previous layer
+ sum += (bp.arch.layers[bp.arch.size - 2].neurons[idx_prev].out ) * (bp.arch.layers[bp.arch.size - 1].neurons[idx_out].w[idx_prev] );
+ }
+ sum += (bp.arch.layers[bp.arch.size - 1].neurons[idx_out].w[bp.arch.layers[bp.arch.size - 2].size] );
+ bp.arch.layers[bp.arch.size - 1].neurons[idx_out].in = sum;
+ bp.arch.layers[bp.arch.size - 1].neurons[idx_out].out = activate_function(sum,bp.arch.layers[bp.arch.size - 1].neurons[idx_out].neuron_function);
+ //if (bp.env.operation_mode == NN_RECALL)
+ //{
+ // printf("patten index: %d\t%f\t%f\t%f\n",bp.env.cur_pat,bp.arch.layers[0].neurons[0].in,bp.arch.layers[0].neurons[1].in,bp.arch.layers[bp.arch.size - 1].neurons[idx_out].out);
+ //}
+ }
+
+ if (bp.env.operation_mode == NN_TRAINING)
+ {
+ bp_cur_state = BP_BACK_PROPAGATION_OUTPUT;
+ }
+ else
+ { // recall
+ update_recall_result();
+ bp_cur_state = BP_NEXT_PATTERN;
+ }
+}
+
+/************************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+*************************************************************/
+static void bp_back_propagation_output (void)
+{
+ int idx;
+ double tempA,tempB;
+
+ for (idx = 0; idx < (bp.arch.layers[bp.arch.size - 1].size); idx++)
+ {
+ tempA = (target_out[idx] - bp.arch.layers[bp.arch.size - 1].neurons[idx].out);
+ switch (bp.arch.layers[bp.arch.size - 1].neurons[idx].neuron_function)
+ {
+ case NN_LINEAR_FUNCTION:
+ bp.arch.layers[bp.arch.size - 1].neurons[idx].error = tempA;
+ break;
+ case NN_GAUSIAN_FUNCTION:
+ printf("BP net can't have Gaussian Neurons, exit\n");
+ exit(1);
+ break;
+ default: // NN_SIGMOID_FUNCTION
+ tempB = (bp.arch.layers[bp.arch.size - 1].neurons[idx].out) * ( 1.0 - (bp.arch.layers[bp.arch.size - 1].neurons[idx].out));
+ bp.arch.layers[bp.arch.size - 1].neurons[idx].error = tempA * tempB;
+ break;
+ }
+ bp.mse += (tempA * tempA);
+ }
+
+ bp_cur_state = BP_BACK_PROPAGATION_HIDDENS;
+}
+
+/************************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+*************************************************************/
+static void bp_back_propagation_hiddens (void)
+{
+ int idx_l, idx_cn, idx_nn;
+ double tempA,sum;
+
+ for (idx_l = bp.arch.size - 2; idx_l > 0; idx_l--)
+ { // loop through all the hidden layers
+ for (idx_cn = 0; idx_cn < (bp.arch.layers[idx_l].size) ; idx_cn++ )
+ { // loop through all the neurons in the current hidden layer
+ sum = 0.0;
+ for (idx_nn = 0; idx_nn < (bp.arch.layers[idx_l + 1].size); idx_nn++ )
+ { // loop through the next layer's neurons
+ sum += (bp.arch.layers[idx_l + 1].neurons[idx_nn].error) * (bp.arch.layers[idx_l + 1].neurons[idx_nn].w[idx_cn]);
+ }
+ tempA = bp.arch.layers[idx_l].neurons[idx_cn].out * ( 1.0 - (bp.arch.layers[idx_l].neurons[idx_cn].out));
+ bp.arch.layers[idx_l].neurons[idx_cn].error = sum * tempA;
+ }
+
+ }
+
+ bp_cur_state = BP_BATCH_TEMP_WEIGHT_STEP_CHANGE;
+}
+
+/************************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+*************************************************************/
+static void bp_batch_temp_weight_step_change (void)
+{
+ int idx_layer,idx_cn,idx_pn;
+ double tempA;
+
+ for (idx_layer = bp.arch.size - 1; idx_layer > 0; idx_layer--)
+ { // loop through layers
+ for (idx_cn = 0; idx_cn < (bp.arch.layers[idx_layer].size) ; idx_cn++)
+ { // loop neurons in the current layer
+ for (idx_pn = 0; idx_pn < (bp.arch.layers[idx_layer - 1].size) ; idx_pn++)
+ { // loop through neurons in the previous layer
+ tempA = bp.arch.layers[idx_layer].neurons[idx_cn].error * bp.arch.layers[idx_layer - 1].neurons[idx_pn].out;
+ tempA *= bp.env.alpha;
+ bp.arch.layers[idx_layer].neurons[idx_cn].temp_delta_w[idx_pn] += tempA;
+ }
+ bp.arch.layers[idx_layer].neurons[idx_cn].temp_delta_w[bp.arch.layers[idx_layer - 1].size] += bp.env.alpha * bp.arch.layers[idx_layer].neurons[idx_cn].error;
+ }
+ }
+
+ if (bp.env.train_mode == NN_BATCH_MODE)
+ {
+ bp_cur_state = BP_NEXT_PATTERN;
+ }
+ else
+ {
+ bp_cur_state = BP_WEIGHT_STEP_CHANGE;
+ }
+}
+
+/************************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+*************************************************************/
+static void bp_next_pattern (void)
+{
+ bp.env.cur_pat++;
+ if (bp.env.operation_mode == NN_TRAINING)
+ {
+ if (bp.env.train_mode == NN_BATCH_MODE)
+ {
+ if (bp.env.cur_pat < bp.env.max_tra_pat)
+ {
+ bp_cur_state = BP_GET_PATTERN;
+ }
+ else
+ {
+ bp_cur_state = BP_WEIGHT_STEP_CHANGE;
+ }
+ }
+ else // sequential learning
+ {
+ if (bp.env.cur_pat < bp.env.max_tra_pat)
+ {
+ bp_cur_state = BP_GET_PATTERN;
+ }
+ else
+ {
+ bp_cur_state = BP_NEXT_GENERATION;
+ }
+ }
+ }
+ else // recall
+ {
+ if (bp.env.cur_pat < patset.size)
+ {
+ bp_cur_state = BP_GET_PATTERN;
+ }
+ else
+ {
+ bp_cur_state = BP_RECALL_DONE;
+ }
+ }
+
+}
+
+/************************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+*************************************************************/
+static void bp_weight_step_change (void)
+{
+ int idx_layer, idx_cn, idx_pn;
+
+ for (idx_layer = 1; idx_layer < (bp.arch.size) ; idx_layer++ )
+ { // loop through the layers
+ for (idx_cn = 0; idx_cn < (bp.arch.layers[idx_layer].size) ; idx_cn++)
+ { // loop through the neurons in the current layer
+ for (idx_pn = 0; idx_pn <= (bp.arch.layers[idx_layer - 1].size) ; idx_pn++)
+ { // loop through the connect weights of the current neurons
+ bp.arch.layers[idx_layer].neurons[idx_cn].delta_w[idx_pn] *= bp.env.gama;
+ bp.arch.layers[idx_layer].neurons[idx_cn].delta_w[idx_pn] += (bp.arch.layers[idx_layer].neurons[idx_cn].temp_delta_w[idx_pn]); // /(bp.env.max_tra_pat);
+ bp.arch.layers[idx_layer].neurons[idx_cn].temp_delta_w[idx_pn] = 0.0;
+ }
+ }
+ }
+ bp_cur_state = BP_WEIGHT_CHANGE;
+}
+
+/************************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+*************************************************************/
+static void bp_weight_change (void)
+{
+ int idx_layer, idx_cn, idx_pn;
+
+ for (idx_layer = 1; idx_layer < (bp.arch.size) ; idx_layer++ )
+ { // loop through the layers
+ for (idx_cn = 0; idx_cn < (bp.arch.layers[idx_layer].size) ; idx_cn++)
+ { // loop through the neurons in the current layer
+ for (idx_pn = 0; idx_pn <= (bp.arch.layers[idx_layer - 1].size) ; idx_pn++)
+ { // loop through the connect weights of the current neurons
+ bp.arch.layers[idx_layer].neurons[idx_cn].w[idx_pn] += bp.arch.layers[idx_layer].neurons[idx_cn].delta_w[idx_pn];
+ }
+ }
+ }
+
+ if (bp.env.train_mode == NN_BATCH_MODE)
+ {
+ bp_cur_state = BP_NEXT_GENERATION;
+ }
+ else
+ {
+ bp_cur_state = BP_NEXT_PATTERN;
+ }
+}
+
+/************************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+*************************************************************/
+static void bp_next_generation (void)
+{
+ int idx_layer, idx_cn; // idx_pn;
+
+ for (idx_layer = 0; idx_layer < (bp.arch.size) ; idx_layer++ )
+ { // loop through the layers
+ for (idx_cn = 0; idx_cn < (bp.arch.layers[idx_layer].size) ; idx_cn++)
+ { // loop through the neurons in the current layer
+ // clean the error
+ bp.arch.layers[idx_layer].neurons[idx_cn].error = 0.0;
+ //if (idx_layer >0)
+ //{ // clean temp. step weight weights
+ // for (idx_pn = 0; idx_pn <= (bp.arch.layers[idx_layer - 1].size) ; idx_pn++)
+ // { // loop through the connect weights of the current neurons
+ // bp.arch.layers[idx_layer].neurons[idx_cn].temp_delta_w[idx_pn] = 0.0;
+ // }
+ //}
+ }
+ }
+
+ bp.mse /= bp.env.max_tra_pat;
+print_mse();
+
+ //bp.env.cur_gen++;
+
+ if ((++bp.env.cur_gen) < bp.env.max_gen) // add error criterion later
+ {
+ bp.mse = 0.0; //clear mean squared error
+ bp_cur_state = BP_UPDATE_LEARNING_RATE;
+ }
+ else
+ {
+ bp_cur_state = BP_TRAINING_DONE;
+ }
+}
+
+/************************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+*************************************************************/
+static void bp_update_learning_rate (void)
+{
+ bp_cur_state = BP_UPDATE_MOMENTUM_RATE;
+}
+
+/************************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+*************************************************************/
+static void bp_update_momentum_rate (void)
+{
+ bp.env.cur_pat = 0;
+ bp_cur_state = BP_GET_PATTERN;
+}
+
+/************************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+*************************************************************/
+static void bp_training_done (void)
+{
+
+}
+
+/************************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+*************************************************************/
+static void bp_recall_done (void)
+{
+
+}
+
+/************************************************************/
+/* neuron activation functions */
+/************************************************************/
+
+/************************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+*************************************************************/
+static float activate_function(float sum, int index)
+{
+ float res;
+ switch (index)
+ {
+ case NN_LINEAR_FUNCTION:
+ res = nn_linear_function(sum);
+ break;
+ case NN_GAUSIAN_FUNCTION:
+ res = nn_gausian_function(sum);
+ break;
+ case NN_SIGMOID_FUNCTION:
+ res = nn_sigmoid_function(sum);
+ break;
+ default:
+ break;
+ }
+ return (res);
+}
+
+/************************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+*************************************************************/
+static float nn_linear_function (float sum)
+{
+ float res;
+
+ res = sum;
+ return (res);
+}
+
+/************************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+*************************************************************/
+static float nn_gausian_function (float sum)
+{
+ printf("no Gaussian function implemented\n");
+ exit(1);
+ return (sum);
+}
+
+/************************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+*************************************************************/
+static float nn_sigmoid_function (float sum)
+{
+ float res;
+ res = (float)sigmoid(sum);
+ return (res);
+}
diff --git a/pso_nn/bp.h b/pso_nn/bp.h
new file mode 100644
index 0000000..af89edd
--- /dev/null
+++ b/pso_nn/bp.h
@@ -0,0 +1,118 @@
+#ifndef BP_NN_H
+#define BP_NN_H
+
+#include "nnet.h"
+
+/**************************************************************/
+/* Constants and Macros */
+/**************************************************************/
+
+/**************************************************************/
+/* Enumerations */
+/**************************************************************/
+typedef enum BP_Training_Mode_Tag
+{
+ NN_BATCH_MODE,
+ NN_SEQUENTIAL_MODE,
+ NUM_NN_TRAINING_MODES
+} BP_Training_Mode_Type;
+
+typedef enum BP_State_Tag
+{
+ BP_GET_PATTERN,
+ BP_FEEDFORWARD_INPUT,
+ BP_FEEDFORWARD_HIDDEN,
+ BP_FEEDFORWARD_OUTPUT,
+ BP_BACK_PROPAGATION_OUTPUT,
+ BP_BACK_PROPAGATION_HIDDENS,
+ BP_BATCH_TEMP_WEIGHT_STEP_CHANGE,
+ BP_NEXT_PATTERN,
+ BP_WEIGHT_STEP_CHANGE,
+ BP_WEIGHT_CHANGE,
+ BP_NEXT_GENERATION,
+ BP_UPDATE_LEARNING_RATE,
+ BP_UPDATE_MOMENTUM_RATE,
+ BP_TRAINING_DONE,
+ BP_RECALL_DONE,
+ NUM_BP_STATES
+} BP_State_Type;
+
+
+/**************************************************************/
+/* Structures */
+/**************************************************************/
+typedef struct Neuron_Type_Tag
+{
+ NN_Function_Type neuron_function; // neuron function
+ float in; // neuron input
+ float out; // neuron output
+ double error; // error of neuron's output
+ FVECTOR delta_w; // step change of weights
+ FVECTOR temp_delta_w; // temp. step change of weights
+ FVECTOR w; // connection weights from the previous layers
+} Neuron_Type;
+
+typedef struct NN_Layer_Arch_Type_Tag
+{
+ int size; // number of neurons in the layer
+ Neuron_Type *neurons; // pointer to the array of the neurons
+ NN_Layer_Type layer_type;
+} NN_Layer_Arch_Type;
+
+typedef struct BP_Arch_Type_Tag
+{
+ int size; // number of layers
+ NN_Layer_Arch_Type *layers; // pointer to the layers
+ int *hidden_number; // pointer to the neuron numbers of hidden layers
+} BP_Arch_Type;
+
+typedef struct BP_Env_Type_Tag
+{
+ NN_Operation_Mode_Type operation_mode; // training or recall
+ BP_Training_Mode_Type train_mode; // training mode if in training operation mode
+ float alpha; // learning rate 0.075
+ float gama; // momentum rate 0.15
+ float criterion; // training error criterion for termination
+ int max_gen; // maximum number of generations
+ int cur_gen; // current generation index
+ int max_tra_pat; // total number of training patterns
+ int cur_pat; // current training pattern index
+
+} BP_Env_Type;
+
+typedef struct BP_Type_Tag
+{
+ BP_Arch_Type arch;
+ BP_Env_Type env;
+ double mse; // mean squared error
+} BP_Type;
+
+typedef struct BP_Pattern_Set_Type_Tag
+{
+ int size; // number of patterns
+ int dim_in; // input dimension
+ int dim_out; // output dimension
+ FMATRIX patterns; // pointer to the array of in/out patterns
+} BP_Pattern_Set_Type;
+
+/**************************************************************/
+/* Function Prototypes for functions with file level scope */
+/**************************************************************/
+
+
+/**************************************************************/
+/* Const Variable with global level scope */
+/**************************************************************/
+
+
+/**************************************************************/
+/* Function Prototypes for functions with global level scope */
+/**************************************************************/
+
+extern float BP_Main_Loop(void);
+extern void BP_Start_Up(char *dataFile);
+extern void BP_Clean_Up(void);
+
+extern void BP_Weights_From_PSO(float *vec);
+extern int BP_Get_PSO_Dimension(void);
+#endif
diff --git a/pso_nn/definiti.h b/pso_nn/definiti.h
new file mode 100644
index 0000000..e35e944
--- /dev/null
+++ b/pso_nn/definiti.h
@@ -0,0 +1,14 @@
+#ifndef DEFINITION_H
+#define DEFINITION_H
+
+typedef enum BOOLEAN_Tag {FALSE, TRUE} BOOLEAN;
+
+typedef int *P_INT;
+typedef P_INT IVECTOR;
+typedef P_INT *IMATRIX;
+
+typedef float *P_FLOAT;
+typedef P_FLOAT FVECTOR;
+typedef P_FLOAT *FMATRIX;
+
+#endif
diff --git a/pso_nn/headfile.h b/pso_nn/headfile.h
new file mode 100644
index 0000000..cca8e6e
--- /dev/null
+++ b/pso_nn/headfile.h
@@ -0,0 +1,10 @@
+#ifndef __HEADFILE_H__
+#define __HEADFILE_H__
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <math.h>
+#include <string.h>
+#include <time.h> //YS 01/16/98
+
+#endif
diff --git a/pso_nn/main.c b/pso_nn/main.c
new file mode 100644
index 0000000..c1cc76c
--- /dev/null
+++ b/pso_nn/main.c
@@ -0,0 +1,43 @@
+#include "headfile.h"
+#include "psostate.h"
+#include "bp.h"
+#include "mem_loc.h"
+
+/**************************************/
+/* Constants and Macros */
+/**************************************/
+#define NUM_RUN 50
+
+static void main_start_up(char *psoDataFile, char *bpDataFile);
+static void main_clean_up(void);
+
+// Listing 6.14 The main() routine of evolutioanry BP net
+void main (int argc, char *argv[])
+{
+ // check command line
+ if (argc != 3)
+ {
+ printf("Usage: exe_file pso_run_file bp_run_file\n");
+ exit(1);
+ }
+
+ // initialize
+ main_start_up(argv[1],argv[2]);
+
+ PSO_Main_Loop();
+
+ // clean up memory space
+ main_clean_up();
+}
+
+static void main_start_up (char *psoDataFile,char *bpDataFile)
+{
+ BP_Start_Up(bpDataFile);
+ PSO_Start_Up(psoDataFile);
+}
+
+static void main_clean_up (void)
+{
+ PSO_Clean_Up();
+ BP_Clean_Up();
+}
diff --git a/pso_nn/mem_loc.c b/pso_nn/mem_loc.c
new file mode 100644
index 0000000..81d35c4
--- /dev/null
+++ b/pso_nn/mem_loc.c
@@ -0,0 +1,98 @@
+#include "headfile.h"
+#include "mem_loc.h"
+
+/**********************************************************************
+ If you want to allocate a block larger than 64K, you must use
+ farcalloc instead of calloc
+**********************************************************************/
+
+/* Memory allocation functions for integer matrix and vector */
+
+void IVectorAllocate(IVECTOR *ivector, int nCols)
+{
+ if ((*ivector = (IVECTOR) calloc(nCols, sizeof(long int))) == NULL)
+ {
+ fprintf(stderr, "Sorry! Not enough memory for vector\n");
+ exit(1);
+ }
+}
+
+/* Allocate space for columns (int cells) for
+ dynamic two dimensional matrix[rows][cols]
+*/
+
+void IAllocateCols(P_INT imatrix[], int nRows, int nCols)
+{
+ int i;
+ for (i = 0; i < nRows; i++)
+ IVectorAllocate(&imatrix[i], nCols);
+}
+
+/* Allocate space for a two dimensional dynamic matrix [rows] [cols]
+*/
+
+void IMatrixAllocate(IMATRIX *ipmatrix, int nRows, int nCols)
+{
+ if ( (*ipmatrix = (IMATRIX) calloc(nRows, sizeof(long int) ) ) == NULL)
+ {
+ fprintf(stderr, "Sorry! Not enough memory for matrix\n");
+ exit(1);
+ }
+
+ IAllocateCols(*ipmatrix, nRows, nCols);
+}
+
+/* free space for two dimensional dynamic array */
+void IMatrixFree(IMATRIX imatrix, int nRows)
+{
+ int i;
+ for (i = 0; i < nRows; i++)
+ free(imatrix[i]);
+ free(imatrix);
+}
+
+/* *************** Float routines *************** */
+
+void FVectorAllocate(FVECTOR *fvector, int nCols)
+{
+ if ((*fvector = (FVECTOR) calloc(nCols, sizeof(float))) == NULL)
+ {
+ fprintf(stderr, "Sorry! Not enough memory for fvector\n");
+ exit(1);
+ }
+}
+
+/* Allocate space for columns (int cells) for
+ dynamic two dimensional matrix[rows][cols]
+*/
+
+void FAllocateCols(P_FLOAT fmatrix[], int nRows, int nCols)
+{
+ int i;
+ for (i = 0; i < nRows; i++)
+ FVectorAllocate(&fmatrix[i], nCols);
+}
+
+/* Allocate space for a two dimensional dynamic matrix [rows] [cols]
+*/
+
+void FMatrixAllocate(FMATRIX *fpmatrix, int nRows, int nCols)
+{
+ if ( (*fpmatrix = (FMATRIX) calloc(nRows, sizeof(float))) == NULL)
+ {
+ fprintf(stderr, "Sorry! Not enough memory for fmatrix\n");
+ exit(1);
+ }
+
+ FAllocateCols(*fpmatrix, nRows, nCols);
+}
+
+/* free space for two dimensional dynamic array */
+void FMatrixFree(FMATRIX fmatrix, int nRows)
+{
+ int i;
+ for (i = 0; i < nRows; i++)
+ free(fmatrix[i]);
+ free(fmatrix);
+}
+
diff --git a/pso_nn/mem_loc.h b/pso_nn/mem_loc.h
new file mode 100644
index 0000000..e79b2cd
--- /dev/null
+++ b/pso_nn/mem_loc.h
@@ -0,0 +1,17 @@
+#ifndef __MEM_LOC_H__
+#define __MEM_LOC_H__
+
+#include "definiti.h"
+
+extern void IVectorAllocate(IVECTOR *ivector, int nCols);
+extern void IMatrixFree(IMATRIX imatrix, int nRows);
+extern void IMatrixAllocate(IMATRIX *ipmatrix, int nRows, int nCols);
+extern void IAllocateCols(P_INT imatrix[], int nRows, int nCols);
+
+extern void FVectorAllocate(FVECTOR *fvector, int nCols);
+extern void FMatrixFree(FMATRIX fmatrix, int nRows);
+extern void FMatrixAllocate(FMATRIX *fpmatrix, int nRows, int nCols);
+extern void FAllocateCols(P_FLOAT fmatrix[], int nRows, int nCols);
+
+#endif
+
diff --git a/pso_nn/nnet.h b/pso_nn/nnet.h
new file mode 100644
index 0000000..343ff1e
--- /dev/null
+++ b/pso_nn/nnet.h
@@ -0,0 +1,32 @@
+#ifndef NEURAL_NET_H
+#define NEURAL_NET_H
+
+#include "definiti.h"
+
+/**************************************************************/
+/* Enumerations */
+/**************************************************************/
+typedef enum NN_Operation_Mode_Type_Tag
+{
+ NN_TRAINING,
+ NN_RECALL,
+ NUM_BP_OPERATION_MODES
+} NN_Operation_Mode_Type;
+
+typedef enum NN_Function_Type_Tag
+{
+ NN_LINEAR_FUNCTION,
+ NN_GAUSIAN_FUNCTION,
+ NN_SIGMOID_FUNCTION,
+ NUM_NN_FUNCTION_TYPES
+} NN_Function_Type;
+
+typedef enum NN_Layer_Type_Tag
+{
+ NN_INPUT_LAYER,
+ NN_HIDDEN_LAYER,
+ NN_OUTPUT_LAYER,
+ NUM_NN_LAYERS
+} NN_Layer_Type;
+
+#endif
diff --git a/pso_nn/psostate.c b/pso_nn/psostate.c
new file mode 100644
index 0000000..a2c16fd
--- /dev/null
+++ b/pso_nn/psostate.c
@@ -0,0 +1,2509 @@
+#include "headfile.h"
+#include "definiti.h"
+#include "psostate.h"
+#include "bp.h"
+#include "mem_loc.h"
+
+
+/**************************************************************/
+/* Static Variable and Const Variable with File level scope */
+/**************************************************************/
+static int NUM_PSO; // num of PSOs needs to be specified
+static int PSO_UPDATE_PBEST_EACH_CYCLE_FLAG; // needs to be specified, TRUE when PSOs serve as envirnment to the other PSOs
+
+static BOOLEAN pso_update_pbest_each_cycle_pending; //
+static PSO_State_Type PSO_current_state; // current state of the current PSO
+static int cur_pso; // current index of PSOs
+static int total_cycle_of_PSOs; // total cycles of running PSOs
+static int pso_cycle_index = 0; // index of cycles
+
+static PSO_Type *psos; // pointer to the array of PSOs
+
+static double paper_results[50]; // paper use only
+static int counter; // paper use only
+
+/**************************************************************/
+/* Function Prototypes for functions with file level scope */
+/**************************************************************/
+
+static void PSO_update_inertia_weight(void); // state handlers
+static void PSO_evaluate(void);
+static void PSO_update_global_best(void);
+static void PSO_update_local_best(void);
+static void PSO_update_velocity(void);
+static void PSO_update_position(void);
+static void PSO_goal_reach_judge(void);
+static void PSO_next_generation(void);
+static void PSO_update_pbest_each_cycle(void);
+static void PSO_next_pso(void);
+static void PSOs_done(void);
+
+static void pso_initialize(void); // initialization
+static void PSO_random_symmetry_initialize(void);
+static void PSO_random_asymmetry_initialize(void);
+
+static void simple_min(void); // evaluation functions
+static void simple_max(void);
+static void michalewicz_min(void);
+static void michalewicz_max(void);
+static void michalewicz_2_min(void);
+static void michalewicz_2_max(void);
+static void g1_min(void);
+static void g1_max(void);
+static void g7_min(void);
+static void g7_max(void);
+static void g9_min(void);
+static void g9_max(void);
+static void f6(void);
+static void sphere(void);
+static void rosenbrock(void);
+static void rastrigrin(void);
+static void griewank(void);
+
+static void constant_iw(void); // update inertia weight methods
+static void linear_iw(void);
+static void fuzzy_two_iw(void);
+static void fuzzy_four_iw(void);
+static void noise_addition_iw(void);
+
+static void read_pso_parameters(char *dataFile); // read PSO parameters from I/O file
+static void allocate_pso_memory(void); // allocate PSOs memory spaces
+static void free_pso_memory(void); // free PSOs memory spaces
+static void pso_store_results(void); // write PSO results to I/O file
+//static void pso_store_paper_results(void);
+
+static void pso_state_handler(int); // PSO state handle routine
+static void PSO_initialize_handler(int); // PSO initialization
+static void evaluate_functions(int); // PSO evaluation functions
+static void iw_update_methods(int); // PSO update inertai weight methods
+
+static void best_solution_values(int); // function value and constrant values for best solution at the current generation
+static void g1_best_solution(void); // for G1
+static void g7_best_solution(void); // G7
+static void g9_best_solution(void); // G9
+
+/**************************************************************/
+/* Function Definitions */
+/**************************************************************/
+
+/**************************************************************/
+/* PSO Start and clean routines and interfaces */
+/**************************************************************/
+/**************************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+***************************************************************/
+void PSO_Start_Up (char *dataFile)
+{
+ read_pso_parameters(dataFile);
+ allocate_pso_memory(); // allocate memory for particles
+}
+
+/*************************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+*************************************************************/
+void PSO_Clean_Up (void)
+{
+ free_pso_memory(); // free memory space of particles
+}
+
+/************************************************************/
+/* PSO functons */
+/************************************************************/
+/************************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+*************************************************************/
+void PSO_Main_Loop (void)
+{
+ BOOLEAN running;
+
+ pso_initialize();
+ // start running PSOs
+ while ((pso_cycle_index++) < total_cycle_of_PSOs)
+ {
+ running = TRUE;
+ while (running)
+ {
+ if (PSO_current_state == PSOS_DONE)
+ {
+ running = FALSE; // end running this cycle of PSO
+ }
+ pso_state_handler(PSO_current_state); // run state handler
+ }
+ }
+ pso_store_results(); // output results
+}
+
+/************************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+*************************************************************/
+static void read_pso_parameters (char *dataFile)
+{
+ int idx_i, idx_j;
+ FILE *frun;
+
+ // open the runfile to input parameters
+ if ((frun=fopen(dataFile,"r"))==NULL)
+ {
+ printf("Cant read run file");
+ exit(1);
+ }
+
+ fscanf(frun, "%d",&NUM_PSO); // total number of PSOs
+ // allocate memory for array of the PSOs
+ psos = malloc(NUM_PSO * sizeof(PSO_Type));
+ if (psos == NULL)
+ {
+ printf("Allocating memory for PSOs failed -- aborting\n");
+ exit(1);
+ }
+
+ fscanf(frun, "%d",&PSO_UPDATE_PBEST_EACH_CYCLE_FLAG); // whether to update pbest before moving to run another pso
+ fscanf(frun, "%d",&total_cycle_of_PSOs); // total cycles of running PSOs
+
+ // Read PSOs' parameters from runfile
+ for (idx_i=0; idx_i<NUM_PSO;idx_i++ )
+ {
+ //read environment data
+ fscanf(frun, "%d",&(psos[idx_i].env_data.opti_type )); // optimization type: min or max
+ fscanf(frun, "%d",&(psos[idx_i].env_data.function_type )); // evalutiona function
+ fscanf(frun, "%d",&(psos[idx_i].env_data.iw_method )); // inertia weight update method
+ fscanf(frun, "%d",&(psos[idx_i].env_data.init_type)); // initialization type: sym/asym
+ fscanf(frun, "%f",&(psos[idx_i].env_data.init_range.left )); // left initialization range
+ fscanf(frun, "%f",&(psos[idx_i].env_data.init_range.right ));// right initialization range
+ fscanf(frun, "%f",&(psos[idx_i].env_data.max_velocity )); // maximum velocity
+ fscanf(frun, "%f",&(psos[idx_i].env_data.max_position )); // maximum position
+ fscanf(frun, "%d",&(psos[idx_i].env_data.max_generation )); // max number of generations
+
+ //read PSO data
+ fscanf(frun, "%d",&(psos[idx_i].popu_size )); // population size
+ //fscanf(frun, "%d",&(psos[idx_i].dimension )); // dimension
+ fscanf(frun, "%f",&(psos[idx_i].init_inertia_weight )); // initial inertia weight
+
+ // read boundary flag
+ fscanf(frun, "%d",&(psos[idx_i].env_data.boundary_flag ));
+ if (psos[idx_i].env_data.boundary_flag)
+ {
+ // allocate memory for boundaries
+ FVectorAllocate(&(psos[idx_i].env_data.low_boundaries), psos[idx_i].dimension);
+ FVectorAllocate(&(psos[idx_i].env_data.up_boundaries), psos[idx_i].dimension);
+
+ //read boundaries
+ for (idx_j = 0 ; idx_j < psos[idx_i].dimension ; idx_j++)
+ {
+ fscanf(frun, "%f",&(psos[idx_i].env_data.low_boundaries[idx_j]));
+ fscanf(frun, "%f",&(psos[idx_i].env_data.up_boundaries[idx_j]));
+ }
+ }
+ psos[idx_i].inertia_weight = psos[idx_i].init_inertia_weight;
+ }
+ // close runfile
+ fclose(frun);
+
+ psos[0].dimension = BP_Get_PSO_Dimension();
+}
+
+/************************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+*************************************************************/
+static void allocate_pso_memory (void)
+{
+ int idx_i;
+
+ for (idx_i =0 ; idx_i<NUM_PSO ;idx_i++ )
+ {
+ FVectorAllocate(&(psos[idx_i].pbest_values), psos[idx_i].popu_size);
+ FMatrixAllocate(&(psos[idx_i].velocity_values), psos[idx_i].popu_size, psos[idx_i].dimension);
+ FMatrixAllocate(&(psos[idx_i].position_values), psos[idx_i].popu_size, psos[idx_i].dimension);
+ FMatrixAllocate(&(psos[idx_i].pbest_position_values), psos[idx_i].popu_size, psos[idx_i].dimension);
+ }
+}
+
+/************************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+*************************************************************/
+//static void pso_store_results (void)
+//{
+// int idx_i, idx_j, idx_k;
+// FILE *fout;
+//
+// // open file for write
+// if ((fout=fopen("result","w"))==NULL)
+// {
+// printf("Cant open file for write");
+// exit(1);
+// }
+//
+// // output the best position values
+// fprintf(fout,"the position value of the best individual\n");
+// for (idx_j =0;idx_j < NUM_PSO ; idx_j++)
+// {
+// fprintf(fout,"PSO Number %d :\n",idx_j);
+// for (idx_i=0;idx_i< psos[idx_j].dimension; idx_i++)
+// {
+// fprintf(fout,"%d: %f\n",idx_i,psos[idx_j].pbest_position_values[psos[idx_j].global_best_index][idx_i]);
+// }
+// fprintf(fout,"\n\n");
+// }
+//
+// // output fitness values
+// fprintf(fout,"fitness values in pbest\n");
+//
+// for (idx_j =0;idx_j < NUM_PSO ; idx_j++)
+// {
+// fprintf(fout,"PSO Number %d :\n",idx_j);
+// for (idx_i=0;idx_i< psos[idx_j].popu_size; idx_i++)
+// {
+// fprintf(fout,"%d: %f\n",idx_i,psos[idx_j].pbest_values[idx_i]);
+// }
+// fprintf(fout,"%dth is the best fitness %f\n",psos[idx_j].global_best_index,psos[idx_j].pbest_values[psos[idx_j].global_best_index]);
+// }
+//
+// // output position values
+// for (idx_j =0;idx_j < NUM_PSO ; idx_j++)
+// {
+// fprintf(fout,"PSO Number %d :\n",idx_j);
+//
+// for (idx_i=0;idx_i< psos[idx_j].popu_size; idx_i++)
+// {
+// for (idx_k = 0; idx_k < psos[idx_j].dimension; idx_k++ )
+// {
+// fprintf(fout,"%d:%d %f\n",idx_i,idx_k,psos[idx_j].pbest_position_values[idx_i][idx_k]);
+// }
+// }
+// }
+// fclose(fout);
+//}
+
+/************************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description: for paper use only
+*
+*************************************************************/
+static void pso_store_results (void)
+{
+ int idx_i, idx_j;
+ FILE *fout;
+
+ // open file for write
+ if ((fout=fopen("bp_res.txt","w"))==NULL)
+ {
+ printf("Cant open file for write PSO results");
+ exit(1);
+ }
+
+ // output the best position values
+ //fprintf(fout,"the position value of the best individual\n");
+ for (idx_j =0;idx_j < NUM_PSO ; idx_j++)
+ {
+ //fprintf(fout,"PSO Number %d :\n",idx_j);
+ for (idx_i=0;idx_i< psos[idx_j].dimension; idx_i++)
+ {
+ fprintf(fout,"%f\n",psos[idx_j].pbest_position_values[psos[idx_j].global_best_index][idx_i]);
+ }
+ fprintf(fout,"\n\n");
+ }
+
+ // calculate the best fitness and constraint values
+ //best_solution_values(psos[0].env_data.function_type);
+
+// fprintf(fout,"the minimum value = %f\n",paper_results[counter - 1]);
+ //fprintf(fout,"%f\n",paper_results[counter - 1]);
+
+/* fprintf(fout,"the constraint values\n");
+ for (idx_i = 0; idx_i < (counter - 1) ; idx_i++)
+ {
+ fprintf(fout,"%f\n",paper_results[idx_i]);
+ }
+*/
+ //fprintf(fout,"the velocity values\n");
+ //for (idx_i=0;idx_i< psos[0].dimension; idx_i++)
+ // {
+ // fprintf(fout,"%d: %f\n",idx_i,psos[0].velocity_values[psos[0].global_best_index][idx_i ]);
+ // }
+ fclose(fout);
+}
+
+/************************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+*************************************************************/
+static void free_pso_memory (void)
+{
+ int idx_i;
+ for (idx_i =0 ; idx_i<NUM_PSO ;idx_i++ )
+ {
+ free(psos[idx_i].pbest_values);
+ FMatrixFree((psos[idx_i].velocity_values), psos[idx_i].popu_size );
+ FMatrixFree((psos[idx_i].position_values), psos[idx_i].popu_size );
+ FMatrixFree((psos[idx_i].pbest_position_values), psos[idx_i].popu_size );
+ if (psos[idx_i].env_data.boundary_flag)
+ {
+ free(psos[idx_i].env_data.low_boundaries );
+ free(psos[idx_i].env_data.up_boundaries );
+ }
+ }
+ free(psos);
+}
+
+/************************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+*************************************************************/
+static void pso_initialize (void)
+{
+ int idx_i,idx_j;
+ int stime;
+ long ltime;
+
+ // get the current calendar time
+ ltime = time(NULL);
+ stime = (unsigned) ltime/2;
+ srand(stime);
+
+ for (idx_i = 0; idx_i <NUM_PSO ; idx_i++)
+ {
+ cur_pso = idx_i;
+ for (idx_j = 0; idx_j < (psos[cur_pso].popu_size) ; idx_j++ )
+ {
+ psos[cur_pso].popu_index = idx_j;
+
+ PSO_initialize_handler(psos[cur_pso].env_data.init_type);
+ }
+ }
+
+ cur_pso = 0; // done with initialization, move to the first PSO
+ PSO_current_state = PSO_EVALUATE; // move to the EVALUATE state
+ psos[cur_pso].popu_index = 0; // satrt with the first particle
+ psos[cur_pso].gene_index = 0; // start from the first generation of the first population
+ pso_cycle_index = 0; // start from the first cycle
+}
+
+/*************************************************/
+/* PSO State Handlers */
+/*************************************************/
+
+/*************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+**************************************************/
+static void pso_state_handler (int state_index)
+{
+ switch (state_index)
+ {
+ case PSO_UPDATE_INERTIA_WEIGHT:
+ PSO_update_inertia_weight();
+ break;
+ case PSO_EVALUATE:
+ PSO_evaluate();
+ break;
+ case PSO_UPDATE_GLOBAL_BEST:
+ PSO_update_global_best();
+ break;
+ case PSO_UPDATE_LOCAL_BEST:
+ PSO_update_local_best();
+ break;
+ case PSO_UPDTAE_VELOCITY:
+ PSO_update_velocity();
+ break;
+ case PSO_UPDATE_POSITION:
+ PSO_update_position();
+ break;
+ case PSO_GOAL_REACH_JUDGE:
+ PSO_goal_reach_judge();
+ break;
+ case PSO_NEXT_GENERATION:
+ PSO_next_generation();
+ break;
+ case PSO_UPDATE_PBEST_EACH_CYCLE:
+ PSO_update_pbest_each_cycle();
+ break;
+ case PSO_NEXT_PSO:
+ PSO_next_pso();
+ break;
+ case PSOS_DONE:
+ PSOs_done();
+ break;
+ default:
+ break;
+ }
+}
+
+/*************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+**************************************************/
+static void PSO_update_inertia_weight (void)
+{
+ iw_update_methods(psos[cur_pso].env_data.iw_method);
+ PSO_current_state = PSO_EVALUATE; // move to the next state
+ psos[cur_pso].popu_index = 0; // satrt with the first particle
+
+}
+
+/*************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+**************************************************/
+static void PSO_evaluate (void)
+{
+ if ((psos[cur_pso].popu_index) < (psos[cur_pso].popu_size))
+ {
+ evaluate_functions(psos[cur_pso].env_data.function_type);
+ PSO_current_state = PSO_UPDATE_LOCAL_BEST;
+ }
+ else // done with evaluation, move to the next state
+ {
+ PSO_current_state = PSO_GOAL_REACH_JUDGE ;
+ psos[cur_pso].popu_index = 0;
+ }
+}
+
+/*************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+**************************************************/
+static void PSO_update_local_best (void)
+{
+ int idx_i;
+
+ // here update local best
+ if ( (psos[cur_pso].env_data.opti_type) == MINIMIZATION)
+ { // minimization problem
+ if ( (pso_cycle_index == 1) && ((psos[cur_pso].gene_index) == 0 ) )
+ {
+ psos[cur_pso].global_best_index = 0;
+ psos[cur_pso].pbest_values[psos[cur_pso].popu_index] = psos[cur_pso].eva_fun_value;
+ }
+ if ((psos[cur_pso].eva_fun_value) < (psos[cur_pso].pbest_values[psos[cur_pso].popu_index] ))
+ {
+ psos[cur_pso].pbest_values[psos[cur_pso].popu_index] = psos[cur_pso].eva_fun_value;
+ for (idx_i = 0; idx_i < (psos[cur_pso].dimension) ;idx_i++ )
+ {
+ (psos[cur_pso].pbest_position_values[psos[cur_pso].popu_index ][idx_i ] ) = (psos[cur_pso].position_values[psos[cur_pso].popu_index ][idx_i ] );
+ }
+ }
+ }
+ else
+ { // maximization problem
+ if ( (pso_cycle_index == 1) && ((psos[cur_pso].gene_index) == 0 ) )
+ {
+ psos[cur_pso].global_best_index = 0;
+ psos[cur_pso].pbest_values[psos[cur_pso].popu_index] = psos[cur_pso].eva_fun_value;
+ }
+ if ((psos[cur_pso].eva_fun_value) > (psos[cur_pso].pbest_values[psos[cur_pso].popu_index] ))
+ {
+ psos[cur_pso].pbest_values[psos[cur_pso].popu_index] = psos[cur_pso].eva_fun_value;
+ for (idx_i = 0; idx_i < (psos[cur_pso].dimension) ;idx_i++ )
+ {
+ (psos[cur_pso].pbest_position_values[psos[cur_pso].popu_index ][idx_i ] ) = (psos[cur_pso].position_values[psos[cur_pso].popu_index ][idx_i ] );
+ }
+ }
+ }
+ PSO_current_state = PSO_UPDATE_GLOBAL_BEST ;
+}
+
+/*************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+**************************************************/
+static void PSO_update_global_best (void)
+{
+ // here update global best
+ if ( (psos[cur_pso].env_data.opti_type) == MINIMIZATION)
+ { // minimization problem
+ if ((psos[cur_pso].eva_fun_value) < (psos[cur_pso].pbest_values[psos[cur_pso].global_best_index ] ))
+ {
+ psos[cur_pso].global_best_index = psos[cur_pso].popu_index;
+ }
+ }
+ else
+ { // maximization problem
+ if ((psos[cur_pso].eva_fun_value) > (psos[cur_pso].pbest_values[psos[cur_pso].global_best_index ] ))
+ {
+ psos[cur_pso].global_best_index = psos[cur_pso].popu_index;
+ }
+ }
+
+ PSO_current_state = PSO_UPDTAE_VELOCITY;
+}
+
+/*************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+**************************************************/
+static void PSO_update_velocity (void)
+{
+ int idx_i;
+ // here update velocity
+ for (idx_i = 0; idx_i < (psos[cur_pso].dimension) ;idx_i++ )
+ {
+ psos[cur_pso].velocity_values[psos[cur_pso].popu_index ][idx_i ] =
+ (psos[cur_pso].inertia_weight) * (psos[cur_pso].velocity_values[psos[cur_pso].popu_index ][idx_i ] )
+ + 2*((rand()%1000)/1000.0) * (psos[cur_pso].pbest_position_values[psos[cur_pso].popu_index ][idx_i ] - psos[cur_pso].position_values[psos[cur_pso].popu_index ][idx_i ] )
+ + 2*((rand()%1000)/1000.0) * (psos[cur_pso].pbest_position_values[psos[cur_pso].global_best_index ][idx_i ] - psos[cur_pso].position_values[psos[cur_pso].popu_index ][idx_i ] );
+
+ if (psos[cur_pso].env_data.boundary_flag)
+ {
+ if ( fabs(psos[cur_pso].velocity_values[psos[cur_pso].popu_index ][idx_i ] ) > (0.5*((psos[cur_pso].env_data.up_boundaries[idx_i]) - (psos[cur_pso].env_data.low_boundaries[idx_i] ))))
+ {
+ if ((psos[cur_pso].velocity_values[psos[cur_pso].popu_index ][idx_i ] ) >= 0)
+ {
+ psos[cur_pso].velocity_values[psos[cur_pso].popu_index ][idx_i ] = 0.5*((psos[cur_pso].env_data.up_boundaries[idx_i]) - (psos[cur_pso].env_data.low_boundaries[idx_i] ));
+ }
+ else
+ {
+ psos[cur_pso].velocity_values[psos[cur_pso].popu_index ][idx_i ] = -0.5*((psos[cur_pso].env_data.up_boundaries[idx_i]) - (psos[cur_pso].env_data.low_boundaries[idx_i] ));
+ }
+ }
+ }
+ else
+ {
+ if ( (psos[cur_pso].velocity_values[psos[cur_pso].popu_index ][idx_i ] ) > (psos[cur_pso].env_data.max_velocity ) )
+ {
+ psos[cur_pso].velocity_values[psos[cur_pso].popu_index ][idx_i ] = psos[cur_pso].env_data.max_velocity;
+ }
+ else if ( (psos[cur_pso].velocity_values[psos[cur_pso].popu_index ][idx_i ] ) < (-(psos[cur_pso].env_data.max_velocity ) ) )
+ {
+ psos[cur_pso].velocity_values[psos[cur_pso].popu_index ][idx_i ] = -(psos[cur_pso].env_data.max_velocity );
+ }
+ }
+ }
+
+ PSO_current_state = PSO_UPDATE_POSITION; // go to the PSO_UPDATE_POSITION state
+
+}
+
+/*************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+**************************************************/
+static void PSO_update_position (void)
+{
+ int idx_i;
+ for (idx_i = 0; idx_i < (psos[cur_pso].dimension) ;idx_i++ )
+ {
+ psos[cur_pso].position_values[psos[cur_pso].popu_index ][idx_i ] += psos[cur_pso].velocity_values[psos[cur_pso].popu_index ][idx_i ];
+ if (psos[cur_pso].env_data.boundary_flag)
+ {
+ if ((psos[cur_pso].position_values[psos[cur_pso].popu_index ][idx_i ]) < (psos[cur_pso].env_data.low_boundaries[idx_i] ))
+ {
+ psos[cur_pso].position_values[psos[cur_pso].popu_index ][idx_i ] = psos[cur_pso].env_data.low_boundaries[idx_i] + ((psos[cur_pso].env_data.up_boundaries[idx_i] - psos[cur_pso].env_data.low_boundaries[idx_i] ) * ((rand()%1000)/100000.0)); // low boundary + noise
+ }
+ else if ((psos[cur_pso].position_values[psos[cur_pso].popu_index ][idx_i ]) > (psos[cur_pso].env_data.up_boundaries[idx_i]))
+ {
+ psos[cur_pso].position_values[psos[cur_pso].popu_index ][idx_i ] = psos[cur_pso].env_data.up_boundaries[idx_i] - ((psos[cur_pso].env_data.up_boundaries[idx_i] - psos[cur_pso].env_data.low_boundaries[idx_i] ) * ((rand()%1000)/100000.0)); // up boundary - noise
+ }
+ }
+ }
+
+ PSO_current_state = PSO_EVALUATE; // go back to the PSO_EVALUATE state
+ (psos[cur_pso].popu_index)++;
+}
+
+/*************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+**************************************************/
+static void PSO_goal_reach_judge (void)
+{
+ PSO_current_state = PSO_NEXT_GENERATION;
+}
+
+/*************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+**************************************************/
+static void PSO_next_generation (void)
+{
+//printf("best fitness = %f\tbest index = %d\n",psos[cur_pso].pbest_values[psos[cur_pso].global_best_index ], psos[cur_pso].global_best_index);
+ if ((++(psos[cur_pso].gene_index)) < (psos[cur_pso].env_data.max_generation ) )
+ { // next generation of the same population of PSO
+ PSO_current_state = PSO_UPDATE_INERTIA_WEIGHT;
+ }
+ else
+ {
+ if ( (++cur_pso ) >= NUM_PSO)
+ { // end of the cycle
+ cur_pso = 0; // move to the first pso
+ }
+ PSO_current_state = PSO_UPDATE_PBEST_EACH_CYCLE; // move to the next state
+ psos[cur_pso].popu_index = 0;
+ }
+}
+
+/*************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+**************************************************/
+static void PSO_update_pbest_each_cycle (void)
+{
+ if (PSO_UPDATE_PBEST_EACH_CYCLE_FLAG)
+ {
+ pso_update_pbest_each_cycle_pending = TRUE;
+ if ((psos[cur_pso].popu_index) < (psos[cur_pso].popu_size))
+ {
+ evaluate_functions(psos[cur_pso].env_data.function_type);
+ psos[cur_pso].pbest_values[psos[cur_pso].popu_index] = psos[cur_pso].eva_fun_value; // update pbest
+ psos[cur_pso].popu_index++;
+ }
+ else // done with evaluation, move to the next state
+ {
+ PSO_current_state = PSO_NEXT_PSO;
+ pso_update_pbest_each_cycle_pending = FALSE;
+ }
+ }
+ else
+ {
+ PSO_current_state = PSO_NEXT_PSO;
+ }
+}
+
+/*************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+**************************************************/
+static void PSO_next_pso (void)
+{
+ // next PSO
+ if ( cur_pso > 0)
+ {
+ PSO_current_state = PSO_EVALUATE; // move to the EVALUATE state for the next pso in the same cycle
+ }
+ else
+ { // all the PSOs have been processed
+ PSO_current_state = PSOS_DONE; // end of the cycle
+ }
+ psos[cur_pso].popu_index = 0; // satrt with the first particle
+ psos[cur_pso].gene_index = 0; // satrt with the first particle
+}
+
+/*************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+**************************************************/
+static void PSOs_done (void)
+{
+ PSO_current_state = PSO_EVALUATE; // if start another cycle, start from PSO_EVALUATE
+}
+
+/*************************************************/
+/* PSO Evaluation Functions */
+/*************************************************/
+
+// Listing 6.16 The evaluate_functions() routine
+/*************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+**************************************************/
+static void evaluate_functions (int fun_type)
+{
+ switch (fun_type)
+ {
+ case SIMPLE_MIN:
+ simple_min();
+ break;
+ case SIMPLE_MAX:
+ simple_max();
+ break;
+ case MICHALEWICZ_MIN:
+ michalewicz_min();
+ break;
+ case MICHALEWICZ_MAX:
+ michalewicz_max();
+ break;
+ case MICHALEWICZ_2_MIN:
+ michalewicz_2_min();
+ break;
+ case MICHALEWICZ_2_MAX:
+ michalewicz_2_max();
+ break;
+ case G1_MIN:
+ g1_min();
+ break;
+ case G1_MAX:
+ g1_max();
+ break;
+ case G7_MIN:
+ g7_min();
+ break;
+ case G7_MAX:
+ g7_max();
+ break;
+ case G9_MIN:
+ g9_min();
+ break;
+ case G9_MAX:
+ g9_max();
+ break;
+ case F6:
+ f6();
+ break;
+ case SPHERE:
+ sphere();
+ break;
+ case ROSENBROCK:
+ rosenbrock();
+ break;
+ case RASTRIGRIN:
+ rastrigrin();
+ break;
+ case GRIEWANK:
+ griewank();
+ break;
+ case BP_MAX:
+ BP_Weights_From_PSO(psos[cur_pso].position_values[psos[cur_pso].popu_index]);
+ psos[cur_pso].eva_fun_value = BP_Main_Loop();
+ //printf("psos[cur_pso].eva_fun_value = %f\n",psos[cur_pso].eva_fun_value);
+ break;
+ default:
+ break;
+ }
+}
+
+/*************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+**************************************************/
+static void best_solution_values (int fun_type)
+{
+ switch (fun_type)
+ {
+ case G1_MIN:
+ g1_best_solution();
+ break;
+ case G7_MIN:
+ g7_best_solution();
+ break;
+ case G9_MIN:
+ g9_best_solution();
+ break;
+ default:
+ break;
+ }
+}
+
+/*************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+**************************************************/
+static void simple_min (void)
+{
+ int idx_i;
+ float x, y,fit_value;
+ int env_pso;
+ double temp_max;
+
+ env_pso = (cur_pso)?0:1;
+ if (pso_update_pbest_each_cycle_pending)
+ {
+ x = psos[cur_pso].pbest_position_values[psos[cur_pso].popu_index][0];
+ }
+ else
+ {
+ x = psos[cur_pso].position_values[psos[cur_pso].popu_index][0];
+ }
+ for (idx_i = 0; idx_i < (psos[env_pso].popu_size) ; idx_i++ )
+ {
+ if (pso_update_pbest_each_cycle_pending)
+ {
+ y = psos[env_pso].pbest_position_values[idx_i][0];
+ }
+ else
+ {
+ y = psos[env_pso].position_values[idx_i][0];
+ }
+ if (idx_i == 0)
+ {
+ temp_max = x * y;
+ fit_value = temp_max;
+ }
+ else
+ {
+ fit_value = max((x * y),temp_max);
+ temp_max = fit_value;
+ }
+ }
+
+ psos[cur_pso].eva_fun_value = fit_value;
+}
+
+/*************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+**************************************************/
+static void simple_max (void)
+{
+ int idx_i;
+ float x, y,fit_value;
+ int env_pso;
+ double temp_min;
+
+ env_pso = (cur_pso)?0:1;
+ if (pso_update_pbest_each_cycle_pending)
+ {
+ y = psos[cur_pso].pbest_position_values[psos[cur_pso].popu_index][0];
+ }
+ else
+ {
+ y = psos[cur_pso].position_values[psos[cur_pso].popu_index][0];
+ }
+ for (idx_i = 0; idx_i < (psos[env_pso].popu_size) ; idx_i++ )
+ {
+ if (pso_update_pbest_each_cycle_pending)
+ {
+ x = psos[env_pso].pbest_position_values[idx_i][0];
+ }
+ else
+ {
+ x = psos[env_pso].position_values[idx_i][0];
+ }
+ if (idx_i == 0)
+ {
+ temp_min = x * y;
+ fit_value = temp_min;
+ }
+ else
+ {
+ fit_value = min((x * y),temp_min);
+ temp_min = fit_value;
+ }
+ }
+
+ psos[cur_pso].eva_fun_value = fit_value;
+
+}
+
+/*************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+**************************************************/
+static void michalewicz_min (void)
+{
+ int idx_i;
+ float x1,x2, y1,y2,fit_value;
+ int env_pso;
+ double temp_max,temp_value;
+
+ env_pso = (cur_pso)?0:1;
+ if (pso_update_pbest_each_cycle_pending)
+ {
+ x1 = psos[cur_pso].pbest_position_values[psos[cur_pso].popu_index][0];
+ x2 = psos[cur_pso].pbest_position_values[psos[cur_pso].popu_index][1];
+ }
+ else
+ {
+ x1 = psos[cur_pso].position_values[psos[cur_pso].popu_index][0];
+ x2 = psos[cur_pso].position_values[psos[cur_pso].popu_index][1];
+ }
+
+ for (idx_i = 0; idx_i < (psos[env_pso].popu_size) ; idx_i++ )
+ {
+ if (pso_update_pbest_each_cycle_pending)
+ {
+ y1 = psos[env_pso].pbest_position_values[idx_i][0];
+ y2 = psos[env_pso].pbest_position_values[idx_i][1];
+ }
+ else
+ {
+ y1 = psos[env_pso].position_values[idx_i][0];
+ y2 = psos[env_pso].position_values[idx_i][1];
+ }
+ temp_value = 100 * pow((x2- x1*x1),2) + pow((1.0-x1),2) - y1*(x1+x2*x2) - y2*(x1*x1+x2);
+ if (idx_i == 0)
+ {
+ temp_max = temp_value;
+ fit_value = (float)temp_max;
+ }
+ else
+ {
+ fit_value = max(temp_value,temp_max);
+ temp_max = fit_value;
+ }
+
+ }
+
+ psos[cur_pso].eva_fun_value = fit_value;
+}
+
+/*************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+**************************************************/
+static void michalewicz_max (void)
+{
+ int idx_i;
+ float x1,x2, y1,y2,fit_value;
+ int env_pso;
+ double temp_min,temp_value;
+
+ env_pso = (cur_pso)?0:1;
+
+ if (pso_update_pbest_each_cycle_pending)
+ {
+ y1 = psos[cur_pso].pbest_position_values[psos[cur_pso].popu_index][0];
+ y2 = psos[cur_pso].pbest_position_values[psos[cur_pso].popu_index][1];
+ }
+ else
+ {
+ y1 = psos[cur_pso].position_values[psos[cur_pso].popu_index][0];
+ y2 = psos[cur_pso].position_values[psos[cur_pso].popu_index][1];
+ }
+
+ for (idx_i = 0; idx_i < (psos[env_pso].popu_size) ; idx_i++ )
+ {
+ if (pso_update_pbest_each_cycle_pending)
+ {
+ x1 = psos[env_pso].pbest_position_values[idx_i][0];
+ x2 = psos[env_pso].pbest_position_values[idx_i][1];
+ }
+ else
+ {
+ x1 = psos[env_pso].position_values[idx_i][0];
+ x2 = psos[env_pso].position_values[idx_i][1];
+ }
+ temp_value = 100 * pow((x2- x1*x1),2) + pow((1.0-x1),2) - y1*(x1+x2*x2) - y2*(x1*x1+x2);
+ if (idx_i == 0)
+ {
+ temp_min = temp_value;
+ fit_value = temp_min;
+ }
+ else
+ {
+ fit_value = min(temp_value,temp_min);
+ temp_min = fit_value;
+ }
+ }
+
+ psos[cur_pso].eva_fun_value = fit_value;
+
+}
+
+/*************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+**************************************************/
+static void michalewicz_2_min (void)
+{
+ int idx_i;
+ float x1,x2, y1,y2,fit_value;
+ int env_pso;
+ double temp_max,temp_value;
+
+ env_pso = (cur_pso)?0:1;
+
+ if (pso_update_pbest_each_cycle_pending)
+ {
+ x1 = psos[cur_pso].pbest_position_values[psos[cur_pso].popu_index][0];
+ x2 = psos[cur_pso].pbest_position_values[psos[cur_pso].popu_index][1];
+ }
+ else
+ {
+ x1 = psos[cur_pso].position_values[psos[cur_pso].popu_index][0];
+ x2 = psos[cur_pso].position_values[psos[cur_pso].popu_index][1];
+ }
+
+ for (idx_i = 0; idx_i < (psos[env_pso].popu_size) ; idx_i++ )
+ {
+ if (pso_update_pbest_each_cycle_pending)
+ {
+ y1 = psos[env_pso].pbest_position_values[idx_i][0];
+ y2 = psos[env_pso].pbest_position_values[idx_i][1];
+ }
+ else
+ {
+ y1 = psos[env_pso].position_values[idx_i][0];
+ y2 = psos[env_pso].position_values[idx_i][1];
+ }
+ temp_value = pow((x1-2),2) + pow((x2-1),2) +y1*(x1*x1-x2)+y2*(x1+x2-2);
+
+ if (idx_i == 0)
+ {
+ temp_max = temp_value;
+ fit_value = temp_max;
+ }
+ else
+ {
+ fit_value = max(temp_value,temp_max);
+ temp_max = fit_value;
+ }
+
+ }
+
+ psos[cur_pso].eva_fun_value = fit_value;
+}
+
+/*************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+**************************************************/
+static void michalewicz_2_max (void)
+{
+ int idx_i;
+ float x1,x2, y1,y2,fit_value;
+ int env_pso;
+ double temp_min,temp_value;
+
+ env_pso = (cur_pso)?0:1;
+
+ if (pso_update_pbest_each_cycle_pending)
+ {
+ y1 = psos[cur_pso].pbest_position_values[psos[cur_pso].popu_index][0];
+ y2 = psos[cur_pso].pbest_position_values[psos[cur_pso].popu_index][1];
+ }
+ else
+ {
+ y1 = psos[cur_pso].position_values[psos[cur_pso].popu_index][0];
+ y2 = psos[cur_pso].position_values[psos[cur_pso].popu_index][1];
+ }
+
+ for (idx_i = 0; idx_i < (psos[env_pso].popu_size) ; idx_i++ )
+ {
+ if (pso_update_pbest_each_cycle_pending)
+ {
+ x1 = psos[env_pso].pbest_position_values[idx_i][0];
+ x2 = psos[env_pso].pbest_position_values[idx_i][1];
+ }
+ else
+ {
+ x1 = psos[env_pso].position_values[idx_i][0];
+ x2 = psos[env_pso].position_values[idx_i][1];
+ }
+
+ temp_value = pow((x1-2),2) + pow((x2-1),2) +y1*(x1*x1-x2)+y2*(x1+x2-2);
+
+ if (idx_i == 0)
+ {
+ temp_min = temp_value;
+ fit_value = temp_min;
+ }
+ else
+ {
+ fit_value = min(temp_value,temp_min);
+ temp_min = fit_value;
+ }
+ }
+
+ psos[cur_pso].eva_fun_value = fit_value;
+
+}
+
+/*************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+**************************************************/
+static void g1_min (void)
+{
+ int idx_i,idx_j;
+ double x[13];
+ double y[9];
+ double r[9];
+ double p[9];
+
+ double fit_value;
+ int env_pso;
+ double temp_max,temp_value;
+
+ env_pso = (cur_pso)?0:1;
+
+ if (pso_update_pbest_each_cycle_pending)
+ {
+ for (idx_i = 0; idx_i <13 ; idx_i++ )
+ {
+ x[idx_i] = psos[cur_pso].pbest_position_values[psos[cur_pso].popu_index][idx_i];
+ }
+ }
+ else
+ {
+ for (idx_i = 0; idx_i <13 ; idx_i++ )
+ {
+ x[idx_i] = psos[cur_pso].position_values[psos[cur_pso].popu_index][idx_i];
+ }
+ }
+
+ // constrains
+ r[0] = 2 * x[0] + 2 * x[1] + x[9] + x[10] - 10;
+ r[1] = 2 * x[0] + 2 * x[2] + x[9] + x[11] - 10;
+ r[2] = 2 * x[1] + 2 * x[2] + x[10] + x[11] - 10;
+ r[3] = -8 * x[0] + x[9];
+ r[4] = -8 * x[1] + x[10];
+ r[5] = -8 * x[2] + x[11];
+ r[6] = -2 * x[3] - x[4] + x[9];
+ r[7] = -2 * x[5] - x[6] + x[10];
+ r[8] = -2 * x[7] - x[8] + x[11];
+
+ for (idx_i = 0; idx_i < (psos[env_pso].popu_size) ; idx_i++ )
+ {
+ if (pso_update_pbest_each_cycle_pending)
+ {
+ for (idx_j = 0; idx_j <9 ; idx_j++ )
+ {
+ y[idx_j] = psos[env_pso].pbest_position_values[idx_i][idx_j];
+ }
+ }
+ else
+ {
+ for (idx_j = 0; idx_j <9 ; idx_j++ )
+ {
+ y[idx_j] = psos[env_pso].position_values[idx_i][idx_j];
+ }
+ }
+
+ temp_value = 0.0;
+ for (idx_j = 0; idx_j <4 ; idx_j++ )
+ {
+ temp_value += 5 * (x[idx_j] - x[idx_j] * x[idx_j]);
+ }
+
+ for (idx_j = 4; idx_j <13 ; idx_j++ )
+ {
+ temp_value -= x[idx_j];
+ }
+
+ for (idx_j = 0; idx_j <9 ; idx_j++ )
+ {
+ if ((r[idx_j]) >= (-y[idx_j]/200.0))
+ {
+ p[idx_j] = y[idx_j] * r[idx_j] + 100 * r[idx_j] * r[idx_j];
+ }
+ else
+ {
+ p[idx_j] = - y[idx_j] * y[idx_j]/400.0;
+ }
+ }
+
+
+ for (idx_j = 0; idx_j <9 ; idx_j++ )
+ {
+ temp_value += p[idx_j];
+ }
+
+ if (idx_i == 0)
+ {
+ temp_max = temp_value;
+ fit_value = temp_max;
+ }
+ else
+ {
+ fit_value = max(temp_value,temp_max);
+ temp_max = fit_value;
+ }
+
+ }
+
+ psos[cur_pso].eva_fun_value = fit_value;
+}
+
+/*************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+**************************************************/
+static void g1_best_solution (void)
+{
+ int idx_i,idx_j;
+ double x[13];
+
+ double temp_value;
+
+ for (idx_i = 0; idx_i <13 ; idx_i++ )
+ {
+ x[idx_i] = psos[0].pbest_position_values[psos[0].global_best_index][idx_i];
+ }
+ counter = 10;
+ // constrains
+ paper_results[0] = 2 * x[0] + 2 * x[1] + x[9] + x[10] - 10;
+ paper_results[1] = 2 * x[0] + 2 * x[2] + x[9] + x[11] - 10;
+ paper_results[2] = 2 * x[1] + 2 * x[2] + x[10] + x[11] - 10;
+ paper_results[3] = -8 * x[0] + x[9];
+ paper_results[4] = -8 * x[1] + x[10];
+ paper_results[5] = -8 * x[2] + x[11];
+ paper_results[6] = -2 * x[3] - x[4] + x[9];
+ paper_results[7] = -2 * x[5] - x[6] + x[10];
+ paper_results[8] = -2 * x[7] - x[8] + x[11];
+
+ temp_value = 0.0;
+ for (idx_j = 0; idx_j <4 ; idx_j++ )
+ {
+ temp_value += 5 * (x[idx_j] - x[idx_j] * x[idx_j]);
+ }
+
+ for (idx_j = 4; idx_j <13 ; idx_j++ )
+ {
+ temp_value -= x[idx_j];
+ }
+ paper_results[9] = temp_value;
+}
+
+/*************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+**************************************************/
+static void g1_max (void)
+{
+ int idx_i,idx_j;
+ double x[13];
+ double y[9];
+ double r[9];
+ double p[9];
+
+ double fit_value;
+ int env_pso;
+ double temp_min,temp_value;
+
+ env_pso = (cur_pso)?0:1;
+
+ if (pso_update_pbest_each_cycle_pending)
+ {
+ for (idx_i = 0; idx_i <9 ; idx_i++ )
+ {
+ y[idx_i] = psos[cur_pso].pbest_position_values[psos[cur_pso].popu_index][idx_i];
+ }
+ }
+ else
+ {
+ for (idx_i = 0; idx_i <9 ; idx_i++ )
+ {
+ y[idx_i] = psos[cur_pso].position_values[psos[cur_pso].popu_index][idx_i];
+ }
+ }
+
+ for (idx_i = 0; idx_i < (psos[env_pso].popu_size) ; idx_i++ )
+ {
+ if (pso_update_pbest_each_cycle_pending)
+ {
+ for (idx_j = 0; idx_j <13 ; idx_j++ )
+ {
+ x[idx_j] = psos[env_pso].pbest_position_values[idx_i][idx_j];
+ }
+ }
+ else
+ {
+ for (idx_j = 0; idx_j <13 ; idx_j++ )
+ {
+ x[idx_j] = psos[env_pso].position_values[idx_i][idx_j];
+ }
+ }
+
+ // constrains
+ r[0] = 2 * x[0] + 2 * x[1] + x[9] + x[10] - 10;
+ r[1] = 2 * x[0] + 2 * x[2] + x[9] + x[11] - 10;
+ r[2] = 2 * x[1] + 2 * x[2] + x[10] + x[11] - 10;
+ r[3] = -8 * x[0] + x[9];
+ r[4] = -8 * x[1] + x[10];
+ r[5] = -8 * x[2] + x[11];
+ r[6] = -2 * x[3] - x[4] + x[9];
+ r[7] = -2 * x[5] - x[6] + x[10];
+ r[8] = -2 * x[7] - x[8] + x[11];
+
+ temp_value = 0.0;
+ for (idx_j = 0; idx_j <4 ; idx_j++ )
+ {
+ temp_value += 5 * (x[idx_j] - x[idx_j] * x[idx_j]);
+ }
+
+ for (idx_j = 4; idx_j <13 ; idx_j++ )
+ {
+ temp_value -= x[idx_j];
+ }
+
+ for (idx_j = 0; idx_j <9 ; idx_j++ )
+ {
+ if ((r[idx_j]) >= (-y[idx_j]/200.0))
+ {
+ p[idx_j] = y[idx_j] * r[idx_j] + 100 * r[idx_j] * r[idx_j];
+ }
+ else
+ {
+ p[idx_j] = - y[idx_j] * y[idx_j]/400.0;
+ }
+ }
+
+
+ for (idx_j = 0; idx_j <9 ; idx_j++ )
+ {
+ temp_value += p[idx_j];
+ }
+
+ if (idx_i == 0)
+ {
+ temp_min = temp_value;
+ fit_value = temp_min;
+ }
+ else
+ {
+ fit_value = min(temp_value,temp_min);
+ temp_min = fit_value;
+ }
+
+ }
+
+ psos[cur_pso].eva_fun_value = fit_value;
+}
+
+/*************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+**************************************************/
+static void g7_min (void)
+{
+ int idx_i;
+ double x1, x2, x3, x4, x5, x6, x7, x8, x9, x10;
+ double y1, y2, y3, y4, y5, y6, y7, y8;
+ double r1, r2, r3, r4, r5, r6, r7, r8;
+ double p1, p2, p3, p4, p5, p6, p7, p8;
+
+ double fit_value;
+ int env_pso;
+ double temp_max,temp_value;
+
+ env_pso = (cur_pso)?0:1;
+
+ if (pso_update_pbest_each_cycle_pending)
+ {
+ x1 = psos[cur_pso].pbest_position_values[psos[cur_pso].popu_index][0];
+ x2 = psos[cur_pso].pbest_position_values[psos[cur_pso].popu_index][1];
+ x3 = psos[cur_pso].pbest_position_values[psos[cur_pso].popu_index][2];
+ x4 = psos[cur_pso].pbest_position_values[psos[cur_pso].popu_index][3];
+ x5 = psos[cur_pso].pbest_position_values[psos[cur_pso].popu_index][4];
+ x6 = psos[cur_pso].pbest_position_values[psos[cur_pso].popu_index][5];
+ x7 = psos[cur_pso].pbest_position_values[psos[cur_pso].popu_index][6];
+ x8 = psos[cur_pso].pbest_position_values[psos[cur_pso].popu_index][7];
+ x9 = psos[cur_pso].pbest_position_values[psos[cur_pso].popu_index][8];
+ x10 = psos[cur_pso].pbest_position_values[psos[cur_pso].popu_index][9];
+ }
+ else
+ {
+ x1 = psos[cur_pso].position_values[psos[cur_pso].popu_index][0];
+ x2 = psos[cur_pso].position_values[psos[cur_pso].popu_index][1];
+ x3 = psos[cur_pso].position_values[psos[cur_pso].popu_index][2];
+ x4 = psos[cur_pso].position_values[psos[cur_pso].popu_index][3];
+ x5 = psos[cur_pso].position_values[psos[cur_pso].popu_index][4];
+ x6 = psos[cur_pso].position_values[psos[cur_pso].popu_index][5];
+ x7 = psos[cur_pso].position_values[psos[cur_pso].popu_index][6];
+ x8 = psos[cur_pso].position_values[psos[cur_pso].popu_index][7];
+ x9 = psos[cur_pso].position_values[psos[cur_pso].popu_index][8];
+ x10 = psos[cur_pso].position_values[psos[cur_pso].popu_index][9];
+ }
+
+ // restrictions
+ r1= -(105- 4*x1- 5*x2+ 3*x7- 9*x8);
+ r2= -(-3*pow(x1-2, 2)- 4*pow(x2-3, 2)- 2*x3*x3+ 7*x4+ 120);
+ r3= -(-10*x1+ 8*x2+ 17*x7- 2*x8);
+ r4= -(-x1*x1- 2*pow(x2-2, 2)+ 2*x1*x2- 14*x5+ 6*x6);
+ r5= -(8*x1- 2*x2- 5*x9+ 2*x10+12);
+ r6= -(-5*x1*x1- 8*x2- pow(x3-6, 2)+ 2*x4+ 40);
+ r7= -(3*x1 -6*x2- 12*pow(x9-8, 2)+ 7*x10);
+ r8= -(-0.5*pow(x1-8, 2)- 2*(x2-4)- 3*x5*x5+ x6+ 30);
+
+ for (idx_i = 0; idx_i < (psos[env_pso].popu_size) ; idx_i++ )
+ {
+ if (pso_update_pbest_each_cycle_pending)
+ {
+ y1 = psos[env_pso].pbest_position_values[idx_i][0];
+ y2 = psos[env_pso].pbest_position_values[idx_i][1];
+ y3 = psos[env_pso].pbest_position_values[idx_i][2];
+ y4 = psos[env_pso].pbest_position_values[idx_i][3];
+ y5 = psos[env_pso].pbest_position_values[idx_i][4];
+ y6 = psos[env_pso].pbest_position_values[idx_i][5];
+ y7 = psos[env_pso].pbest_position_values[idx_i][6];
+ y8 = psos[env_pso].pbest_position_values[idx_i][7];
+ }
+ else
+ {
+ y1 = psos[env_pso].position_values[idx_i][0];
+ y2 = psos[env_pso].position_values[idx_i][1];
+ y3 = psos[env_pso].position_values[idx_i][2];
+ y4 = psos[env_pso].position_values[idx_i][3];
+ y5 = psos[env_pso].position_values[idx_i][4];
+ y6 = psos[env_pso].position_values[idx_i][5];
+ y7 = psos[env_pso].position_values[idx_i][6];
+ y8 = psos[env_pso].position_values[idx_i][7];
+ }
+
+ temp_value = x1*x1+ x2*x2+ x1*x2- 14*x1 -16*x2+ pow(x3-10, 2)
+ +4*pow(x4-5,2)+ pow(x5-3, 2)+ 2*pow(x6-1, 2)+ 5*x7*x7
+ +7*pow(x8-11, 2)+ 2*pow(x9-10, 2)+ pow(x10-7, 2)+ 45;
+
+ if ((r1) >= (-y1/200.0))
+ {
+ p1 = y1 * r1 + 100 * r1 * r1;
+ }
+ else
+ {
+ p1 = - y1*y1/400.0;
+ }
+
+ if ((r2) >= (-y2/200.0))
+ {
+ p2 = y2 * r2 + 100 * r2 * r2;
+ }
+ else
+ {
+ p2 = - y2*y2/400.0;
+ }
+
+ if ((r3) >= (-y3/200.0))
+ {
+ p3 = y3 * r3 + 100 * r3 * r3;
+ }
+ else
+ {
+ p3 = - y3*y3/400.0;
+ }
+
+ if ((r4) >= (-y4/200.0))
+ {
+ p4 = y4 * r4 + 100 * r4 * r4;
+ }
+ else
+ {
+ p4 = - y4*y4/400.0;
+ }
+
+ if ((r5) >= (-y5/200.0))
+ {
+ p5 = y5 * r5 + 100 * r5 * r5;
+ }
+ else
+ {
+ p5 = - y5*y5/400.0;
+ }
+
+ if ((r6) >= (-y6/200.0))
+ {
+ p6 = y6 * r6 + 100 * r6 * r6;
+ }
+ else
+ {
+ p6 = - y6*y6/400.0;
+ }
+
+ if ((r7) >= (-y7/200.0))
+ {
+ p7 = y7 * r7 + 100 * r7 * r7;
+ }
+ else
+ {
+ p7 = - y7*y7/400.0;
+ }
+
+ if ((r8) >= (-y8/200.0))
+ {
+ p8 = y8 * r8 + 100 * r8 * r8;
+ }
+ else
+ {
+ p8 = - y8*y8/400.0;
+ }
+
+ temp_value += p1 + p2 + p3 + p4 + p5 + p6 + p7 + p8;
+ //temp_value += (y1*r1 +y2*r2 +y3*r3 +y4*r4 +y5*r5 +y6*r6 +y7*r7+y8*r8);
+
+ if (idx_i == 0)
+ {
+ temp_max = temp_value;
+ fit_value = temp_max;
+ }
+ else
+ {
+ fit_value = max(temp_value,temp_max);
+ temp_max = fit_value;
+ }
+
+ }
+
+ psos[cur_pso].eva_fun_value = fit_value;
+
+}
+
+/*************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+**************************************************/
+static void g7_best_solution (void)
+{
+ double x1, x2, x3, x4, x5, x6, x7, x8, x9, x10;
+
+ x1 = psos[0].pbest_position_values[psos[0].global_best_index][0];
+ x2 = psos[0].pbest_position_values[psos[0].global_best_index][1];
+ x3 = psos[0].pbest_position_values[psos[0].global_best_index][2];
+ x4 = psos[0].pbest_position_values[psos[0].global_best_index][3];
+ x5 = psos[0].pbest_position_values[psos[0].global_best_index][4];
+ x6 = psos[0].pbest_position_values[psos[0].global_best_index][5];
+ x7 = psos[0].pbest_position_values[psos[0].global_best_index][6];
+ x8 = psos[0].pbest_position_values[psos[0].global_best_index][7];
+ x9 = psos[0].pbest_position_values[psos[0].global_best_index][8];
+ x10 = psos[0].pbest_position_values[psos[0].global_best_index][9];
+
+ // restrictions
+ paper_results[0] = -(105- 4*x1- 5*x2+ 3*x7- 9*x8);
+ paper_results[1] = -(-3*pow(x1-2, 2)- 4*pow(x2-3, 2)- 2*x3*x3+ 7*x4+ 120);
+ paper_results[2] = -(-10*x1+ 8*x2+ 17*x7- 2*x8);
+ paper_results[3] = -(-x1*x1- 2*pow(x2-2, 2)+ 2*x1*x2- 14*x5+ 6*x6);
+ paper_results[4] = -(8*x1- 2*x2- 5*x9+ 2*x10+12);
+ paper_results[5] = -(-5*x1*x1- 8*x2- pow(x3-6, 2)+ 2*x4+ 40);
+ paper_results[6] = -(3*x1 -6*x2- 12*pow(x9-8, 2)+ 7*x10);
+ paper_results[7] = -(-0.5*pow(x1-8, 2)- 2*(x2-4)- 3*x5*x5+ x6+ 30);
+
+ paper_results[8] = x1*x1+ x2*x2+ x1*x2- 14*x1 -16*x2+ pow(x3-10, 2)
+ +4*pow(x4-5,2)+ pow(x5-3, 2)+ 2*pow(x6-1, 2)+ 5*x7*x7
+ +7*pow(x8-11, 2)+ 2*pow(x9-10, 2)+ pow(x10-7, 2)+ 45;
+
+ counter = 9;
+}
+
+/*************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+**************************************************/
+static void g7_max (void)
+{
+ int idx_i;
+ double x1, x2, x3, x4, x5, x6, x7, x8, x9, x10;
+ double y1, y2, y3, y4, y5, y6, y7, y8;
+ double r1, r2, r3, r4, r5, r6, r7, r8;
+ double p1, p2, p3, p4, p5, p6, p7, p8;
+ double fit_value;
+ int env_pso;
+ double temp_min,temp_value;
+
+ env_pso = (cur_pso)?0:1;
+
+ if (pso_update_pbest_each_cycle_pending)
+ {
+ y1 = psos[cur_pso].pbest_position_values[psos[cur_pso].popu_index][0];
+ y2 = psos[cur_pso].pbest_position_values[psos[cur_pso].popu_index][1];
+ y3 = psos[cur_pso].pbest_position_values[psos[cur_pso].popu_index][2];
+ y4 = psos[cur_pso].pbest_position_values[psos[cur_pso].popu_index][3];
+ y5 = psos[cur_pso].pbest_position_values[psos[cur_pso].popu_index][4];
+ y6 = psos[cur_pso].pbest_position_values[psos[cur_pso].popu_index][5];
+ y7 = psos[cur_pso].pbest_position_values[psos[cur_pso].popu_index][6];
+ y8 = psos[cur_pso].pbest_position_values[psos[cur_pso].popu_index][7];
+ }
+ else
+ {
+ y1 = psos[cur_pso].position_values[psos[cur_pso].popu_index][0];
+ y2 = psos[cur_pso].position_values[psos[cur_pso].popu_index][1];
+ y3 = psos[cur_pso].position_values[psos[cur_pso].popu_index][2];
+ y4 = psos[cur_pso].position_values[psos[cur_pso].popu_index][3];
+ y5 = psos[cur_pso].position_values[psos[cur_pso].popu_index][4];
+ y6 = psos[cur_pso].position_values[psos[cur_pso].popu_index][5];
+ y7 = psos[cur_pso].position_values[psos[cur_pso].popu_index][6];
+ y8 = psos[cur_pso].position_values[psos[cur_pso].popu_index][7];
+ }
+
+ for (idx_i = 0; idx_i < (psos[env_pso].popu_size) ; idx_i++ )
+ {
+ if (pso_update_pbest_each_cycle_pending)
+ {
+ x1 = psos[env_pso].pbest_position_values[idx_i][0];
+ x2 = psos[env_pso].pbest_position_values[idx_i][1];
+ x3 = psos[env_pso].pbest_position_values[idx_i][2];
+ x4 = psos[env_pso].pbest_position_values[idx_i][3];
+ x5 = psos[env_pso].pbest_position_values[idx_i][4];
+ x6 = psos[env_pso].pbest_position_values[idx_i][5];
+ x7 = psos[env_pso].pbest_position_values[idx_i][6];
+ x8 = psos[env_pso].pbest_position_values[idx_i][7];
+ x9 = psos[env_pso].pbest_position_values[idx_i][8];
+ x10 = psos[env_pso].pbest_position_values[idx_i][9];
+ }
+ else
+ {
+ x1 = psos[env_pso].position_values[idx_i][0];
+ x2 = psos[env_pso].position_values[idx_i][1];
+ x3 = psos[env_pso].position_values[idx_i][2];
+ x4 = psos[env_pso].position_values[idx_i][3];
+ x5 = psos[env_pso].position_values[idx_i][4];
+ x6 = psos[env_pso].position_values[idx_i][5];
+ x7 = psos[env_pso].position_values[idx_i][6];
+ x8 = psos[env_pso].position_values[idx_i][7];
+ x9 = psos[env_pso].position_values[idx_i][8];
+ x10 = psos[env_pso].position_values[idx_i][9];
+ }
+
+ r1= -(105- 4*x1- 5*x2+ 3*x7- 9*x8);
+ r2= -(-3*pow(x1-2, 2)- 4*pow(x2-3, 2)- 2*x3*x3+ 7*x4+ 120);
+ r3= -(-10*x1+ 8*x2+ 17*x7- 2*x8);
+ r4= -(-x1*x1- 2*pow(x2-2, 2)+2*x1*x2- 14*x5+ 6*x6);
+ r5= -(8*x1- 2*x2- 5*x9+ 2*x10+12);
+ r6= -(-5*x1*x1- 8*x2- 1*pow(x3-6, 2)+ 2*x4+ 40);
+ r7= -(3*x1 -6*x2- 12*pow(x9-8, 2)+ 7*x10);
+ r8= -(-0.5*pow(x1-8, 2)- 2*(x2-4)- 3*x5*x5+ x6+ 30);
+
+ temp_value = x1*x1+ x2*x2+ x1*x2- 14*x1 -16*x2+ pow(x3-10, 2)
+ +4*pow(x4-5,2)+ pow(x5-3, 2)+ 2*pow(x6-1, 2)+ 5*x7*x7
+ +7*pow(x8-11, 2)+ 2*pow(x9-10, 2)+ pow(x10-7, 2)+ 45;
+
+ if ((r1) >= (-y1/200.0))
+ {
+ p1 = y1 * r1 + 100 * r1 * r1;
+ }
+ else
+ {
+ p1 = - y1*y1/400.0;
+ }
+
+ if ((r2) >= (-y2/200.0))
+ {
+ p2 = y2 * r2 + 100 * r2 * r2;
+ }
+ else
+ {
+ p2 = - y2*y2/400.0;
+ }
+
+ if ((r3) >= (-y3/200.0))
+ {
+ p3 = y3 * r3 + 100 * r3 * r3;
+ }
+ else
+ {
+ p3 = - y3*y3/400.0;
+ }
+
+ if ((r4) >= (-y4/200.0))
+ {
+ p4 = y4 * r4 + 100 * r4 * r4;
+ }
+ else
+ {
+ p4 = - y4*y4/400.0;
+ }
+
+ if ((r5) >= (-y5/200.0))
+ {
+ p5 = y5 * r5 + 100 * r5 * r5;
+ }
+ else
+ {
+ p5 = - y5*y5/400.0;
+ }
+
+ if ((r6) >= (-y6/200.0))
+ {
+ p6 = y6 * r6 + 100 * r6 * r6;
+ }
+ else
+ {
+ p6 = - y6*y6/400.0;
+ }
+
+ if ((r7) >= (-y7/200.0))
+ {
+ p7 = y7 * r7 + 100 * r7 * r7;
+ }
+ else
+ {
+ p7 = - y7*y7/400.0;
+ }
+
+ if ((r8) >= (-y8/200.0))
+ {
+ p8 = y8 * r8 + 100 * r8 * r8;
+ }
+ else
+ {
+ p8 = - y8*y8/400.0;
+ }
+
+ temp_value += p1 + p2 + p3 + p4 + p5 + p6 + p7 + p8;
+ //temp_value += (y1*r1 +y2*r2 +y3*r3 +y4*r4 +y5*r5 +y6*r6 +y7*r7+y8*r8);
+
+ if (idx_i == 0)
+ {
+ temp_min = temp_value;
+ fit_value = temp_min;
+ }
+ else
+ {
+ fit_value = min(temp_value,temp_min);
+ temp_min = fit_value;
+ }
+
+ }
+
+ psos[cur_pso].eva_fun_value = fit_value;
+}
+
+/*************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+**************************************************/
+static void g9_min (void)
+{
+ int idx_i,idx_j;
+ double x[7];
+ double y[4];
+ double r[4];
+ double p[4];
+
+ double fit_value;
+ int env_pso;
+ double temp_max,temp_value;
+
+ env_pso = (cur_pso)?0:1;
+
+ if (pso_update_pbest_each_cycle_pending)
+ {
+ for (idx_i = 0; idx_i <7 ; idx_i++ )
+ {
+ x[idx_i] = psos[cur_pso].pbest_position_values[psos[cur_pso].popu_index][idx_i];
+ }
+ }
+ else
+ {
+ for (idx_i = 0; idx_i <7 ; idx_i++ )
+ {
+ x[idx_i] = psos[cur_pso].position_values[psos[cur_pso].popu_index][idx_i];
+ }
+ }
+
+ // constrains
+ r[0] = 2 * x[0] * x[0] + 3 * pow(x[1],4) + x[2] + 4 * x[3] * x[3] + 5 * x[4] - 127;
+ r[1] = 7 * x[0] + 3 * x[1] + 10 * x[2] * x[2] + x[3] - x[4] - 282;
+ r[2] = 23 * x[0] + x[1] * x[1] + 6 * x[5] * x[5] - 8 * x[6] - 196;
+ r[3] = 4 * x[0] * x[0] + x[1] * x[1] - 3 * x[0] * x[1] + 2 * x[2] * x[2] + 5 * x[5] - 11 * x[6];
+
+ for (idx_i = 0; idx_i < (psos[env_pso].popu_size) ; idx_i++ )
+ {
+ if (pso_update_pbest_each_cycle_pending)
+ {
+ for (idx_j = 0; idx_j <4 ; idx_j++ )
+ {
+ y[idx_j] = psos[env_pso].pbest_position_values[idx_i][idx_j];
+ }
+ }
+ else
+ {
+ for (idx_j = 0; idx_j <4 ; idx_j++ )
+ {
+ y[idx_j] = psos[env_pso].position_values[idx_i][idx_j];
+ }
+ }
+
+ temp_value = pow((x[0] - 10),2) + 5 * pow((x[1] - 12),2) + pow(x[2],4) + 3 * pow((x[3] - 11),2)
+ + 10 * pow((x[4]),6) + 7 * pow(x[5],2) + pow(x[6],4) - 4 * x[5] * x[6] - 10 * x[5] - 8 * x[6];
+
+ for (idx_j = 0; idx_j <4 ; idx_j++ )
+ {
+ if ((r[idx_j]) >= (-y[idx_j]/200.0))
+ {
+ p[idx_j] = y[idx_j] * r[idx_j] + 100 * r[idx_j] * r[idx_j];
+ }
+ else
+ {
+ p[idx_j] = - y[idx_j] * y[idx_j]/400.0;
+ }
+ temp_value += p[idx_j];
+ }
+ if (idx_i == 0)
+ {
+ temp_max = temp_value;
+ fit_value = temp_max;
+ }
+ else
+ {
+ fit_value = max(temp_value,temp_max);
+ temp_max = fit_value;
+ }
+ }
+ psos[cur_pso].eva_fun_value = fit_value;
+}
+
+/*************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+**************************************************/
+static void g9_best_solution (void)
+{
+ int idx_i;
+ double x[7];
+
+ for (idx_i = 0; idx_i <7 ; idx_i++ )
+ {
+ x[idx_i] = psos[0].pbest_position_values[psos[0].global_best_index][idx_i];
+ }
+
+ // constrains
+ paper_results[0] = 2 * x[0] * x[0] + 3 * pow(x[1],4) + x[2] + 4 * x[3] * x[3] + 5 * x[4] - 127;
+ paper_results[1] = 7 * x[0] + 3 * x[1] + 10 * x[2] * x[2] + x[3] - x[4] - 282;
+ paper_results[2] = 23 * x[0] + x[1] * x[1] + 6 * x[5] * x[5] - 8 * x[6] - 196;
+ paper_results[3] = 4 * x[0] * x[0] + x[1] * x[1] - 3 * x[0] * x[1] + 2 * x[2] * x[2] + 5 * x[5] - 11 * x[6];
+
+ paper_results[4] = pow((x[0] - 10),2) + 5 * pow((x[1] - 12),2) + pow(x[2],4) + 3 * pow((x[3] - 11),2)
+ + 10 * pow((x[4]),6) + 7 * pow(x[5],2) + pow(x[6],4) - 4 * x[5] * x[6] - 10 * x[5] - 8 * x[6];
+ counter = 5;
+}
+
+/*************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+**************************************************/
+static void g9_max (void)
+{
+ int idx_i,idx_j;
+ double x[7];
+ double y[4];
+ double r[4];
+ double p[4];
+
+ double fit_value;
+ int env_pso;
+ double temp_min,temp_value;
+
+ env_pso = (cur_pso)?0:1;
+
+ if (pso_update_pbest_each_cycle_pending)
+ {
+ for (idx_i = 0; idx_i <4 ; idx_i++ )
+ {
+ y[idx_i] = psos[cur_pso].pbest_position_values[psos[cur_pso].popu_index][idx_i];
+ }
+ }
+ else
+ {
+ for (idx_i = 0; idx_i <4 ; idx_i++ )
+ {
+ y[idx_i] = psos[cur_pso].position_values[psos[cur_pso].popu_index][idx_i];
+ }
+ }
+
+ for (idx_i = 0; idx_i < (psos[env_pso].popu_size) ; idx_i++ )
+ {
+ if (pso_update_pbest_each_cycle_pending)
+ {
+ for (idx_j = 0; idx_j <7 ; idx_j++ )
+ {
+ x[idx_j] = psos[env_pso].pbest_position_values[idx_i][idx_j];
+ }
+ }
+ else
+ {
+ for (idx_j = 0; idx_j <7 ; idx_j++ )
+ {
+ x[idx_j] = psos[env_pso].position_values[idx_i][idx_j];
+ }
+ }
+
+ // constrains
+ r[0] = 2 * x[0] * x[0] + 3 * pow(x[1],4) + x[2] + 4 * x[3] * x[3] + 5 * x[4] - 127;
+ r[1] = 7 * x[0] + 3 * x[1] + 10 * x[2] * x[2] + x[3] - x[4] - 282;
+ r[2] = 23 * x[0] + x[1] * x[1] + 6 * x[5] * x[5] - 8 * x[6] - 196;
+ r[3] = 4 * x[0] * x[0] + x[1] * x[1] - 3 * x[0] * x[1] + 2 * x[2] * x[2] + 5 * x[5] - 11 * x[6];
+
+ temp_value = pow((x[0] - 10),2) + 5 * pow((x[1] - 12),2) + pow(x[2],4) + 3 * pow((x[3] - 11),2)
+ + 10 * pow((x[4]),6) + 7 * pow(x[5],2) + pow(x[6],4) - 4 * x[5] * x[6] - 10 * x[5] - 8 * x[6];
+
+ for (idx_j = 0; idx_j <4 ; idx_j++ )
+ {
+ if ((r[idx_j]) >= (-y[idx_j]/200.0))
+ {
+ p[idx_j] = y[idx_j] * r[idx_j] + 100 * r[idx_j] * r[idx_j];
+ }
+ else
+ {
+ p[idx_j] = - y[idx_j] * y[idx_j]/400.0;
+ }
+ temp_value += p[idx_j];
+ }
+ if (idx_i == 0)
+ {
+ temp_min = temp_value;
+ fit_value = temp_min;
+ }
+ else
+ {
+ fit_value = min(temp_value,temp_min);
+ temp_min = fit_value;
+ }
+
+ }
+
+ psos[cur_pso].eva_fun_value = fit_value;
+}
+
+/*************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+**************************************************/
+static void f6 (void)
+{
+ /*
+ This is the f6 function as described in the Handbook of
+ Genetic Algorithms, p.8
+ */
+ double num, denom, f6;
+
+ num = (sin(sqrt((psos[cur_pso].position_values[psos[cur_pso].popu_index][0]*psos[cur_pso].position_values[psos[cur_pso].popu_index][0])+(psos[cur_pso].position_values[psos[cur_pso].popu_index][1]*psos[cur_pso].position_values[psos[cur_pso].popu_index][1])))) *
+ (sin(sqrt((psos[cur_pso].position_values[psos[cur_pso].popu_index][0]*psos[cur_pso].position_values[psos[cur_pso].popu_index][0])+(psos[cur_pso].position_values[psos[cur_pso].popu_index][1]*psos[cur_pso].position_values[psos[cur_pso].popu_index][1])))) - 0.5;
+
+ denom = (1.0 + 0.001 * ((psos[cur_pso].position_values[psos[cur_pso].popu_index][0] * psos[cur_pso].position_values[psos[cur_pso].popu_index][0]) + (psos[cur_pso].position_values[psos[cur_pso].popu_index][1]*psos[cur_pso].position_values[psos[cur_pso].popu_index][1]))) *
+ (1.0 + 0.001 * ((psos[cur_pso].position_values[psos[cur_pso].popu_index][0] * psos[cur_pso].position_values[psos[cur_pso].popu_index][0]) + (psos[cur_pso].position_values[psos[cur_pso].popu_index][1]*psos[cur_pso].position_values[psos[cur_pso].popu_index][1])));
+
+ f6 = (double) 0.5 - (num/denom);
+
+ psos[cur_pso].eva_fun_value = 1 - f6;
+
+}
+
+/*************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+**************************************************/
+static void sphere (void)
+{
+ /* This is the familiar sphere model */
+
+ double result;
+ int idx_i;
+
+ result=0.0;
+
+ for (idx_i = 0; idx_i < ( psos[cur_pso].dimension ); idx_i++)
+ {
+ result += psos[cur_pso].position_values[psos[cur_pso].popu_index][idx_i] * psos[cur_pso].position_values[psos[cur_pso].popu_index][idx_i];
+ }
+
+ psos[cur_pso].eva_fun_value = result;
+}
+
+/*************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+**************************************************/
+static void rosenbrock (void)
+{
+
+ /* this is the Rosenbrock function */
+
+ int idx_i;
+ double result;
+
+ result=0.0;
+
+ for (idx_i = 1; idx_i < ( psos[cur_pso].dimension ); idx_i++)
+ {
+ result += 100.0*(psos[cur_pso].position_values[psos[cur_pso].popu_index][idx_i] - psos[cur_pso].position_values[psos[cur_pso].popu_index][idx_i - 1] * psos[cur_pso].position_values[psos[cur_pso].popu_index][idx_i - 1]) * (psos[cur_pso].position_values[psos[cur_pso].popu_index][idx_i] - psos[cur_pso].position_values[psos[cur_pso].popu_index][idx_i - 1] * psos[cur_pso].position_values[psos[cur_pso].popu_index][idx_i - 1]) + (psos[cur_pso].position_values[psos[cur_pso].popu_index][idx_i - 1]-1) * (psos[cur_pso].position_values[psos[cur_pso].popu_index][idx_i - 1]-1);
+ }
+
+ psos[cur_pso].eva_fun_value = fabs(result);
+}
+
+/*************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+**************************************************/
+static void rastrigrin (void)
+{
+ /* This is the generalized Rastrigrin function */
+
+ int idx_i;
+ double result;
+
+ result=0.0;
+
+ for (idx_i = 0;idx_i < ( psos[cur_pso].dimension ); idx_i++)
+ {
+ result +=psos[cur_pso].position_values[psos[cur_pso].popu_index][idx_i] * psos[cur_pso].position_values[psos[cur_pso].popu_index][idx_i] - 10.0*cos(2.0*3.141591 * psos[cur_pso].position_values[psos[cur_pso].popu_index][idx_i])+10.0;
+ }
+ psos[cur_pso].eva_fun_value = result;
+}
+
+/*************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+**************************************************/
+static void griewank (void)
+{
+ /* This is the generalized Griewank function */
+
+ int idx_i;
+ double result_s,result_p;
+
+ result_s=0.0;
+ result_p=1.0;
+
+ for (idx_i = 0; idx_i < ( psos[cur_pso].dimension ); idx_i++)
+ {
+ result_s +=psos[cur_pso].position_values[psos[cur_pso].popu_index][idx_i] * psos[cur_pso].position_values[psos[cur_pso].popu_index][idx_i];
+ result_p *=cos(psos[cur_pso].position_values[psos[cur_pso].popu_index][idx_i]/sqrt(idx_i+1));
+ }
+ psos[cur_pso].eva_fun_value = result_s/4000.0 - result_p +1;
+}
+
+/*************************************************/
+/* Inertia Weight Update Functions */
+/*************************************************/
+
+/*************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+**************************************************/
+static void iw_update_methods (int iw_type)
+{
+ switch (iw_type)
+ {
+ case CONSTANT_IW:
+ constant_iw();
+ break;
+ case LINEAR_IW:
+ linear_iw();
+ break;
+ case FUZZY_TWO_IW:
+ fuzzy_two_iw();
+ break;
+ case FUZZY_FOUR_IW:
+ fuzzy_four_iw();
+ break;
+ case NOISE_ADDITION_IW:
+ noise_addition_iw();
+ break;
+ default:
+ break;
+ }
+}
+
+/*************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+**************************************************/
+static void constant_iw (void)
+{
+ psos[cur_pso].inertia_weight = psos[cur_pso].init_inertia_weight;
+}
+
+/*************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+**************************************************/
+static void linear_iw (void)
+{
+ int total_gen,cur_index;
+
+ total_gen = total_cycle_of_PSOs * psos[cur_pso].env_data.max_generation;
+ cur_index = pso_cycle_index * psos[cur_pso].env_data.max_generation + psos[cur_pso].gene_index;
+
+ psos[cur_pso].inertia_weight = ((psos[cur_pso].init_inertia_weight ) - 0.4 ) * ( total_gen - cur_index) / total_gen + 0.4 + ((rand()%600)/1000.0) - 0.3;
+}
+
+/*************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+**************************************************/
+static void fuzzy_two_iw (void)
+{
+
+}
+
+/*************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+**************************************************/
+static void fuzzy_four_iw (void)
+{
+
+}
+
+/*************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+**************************************************/
+static void noise_addition_iw (void)
+{
+ psos[cur_pso].inertia_weight = psos[cur_pso].init_inertia_weight + ((rand()%600)/1000.0) - 0.3 ;
+}
+
+/*************************************************/
+/* Initizalization Functions */
+/*************************************************/
+
+/*************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description: Symmetry Initialization
+*
+**************************************************/
+static void PSO_initialize_handler (int init_type)
+{
+ switch (init_type)
+ {
+ case PSO_RANDOM_SYMMETRY_INITIALIZE:
+ PSO_random_symmetry_initialize();
+ break;
+ case PSO_RANDOM_ASYMMETRY_INITIALIZE:
+ PSO_random_asymmetry_initialize();
+ break;
+ default:
+ break;
+ }
+}
+
+/*************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description: Symmetry Initialization
+*
+**************************************************/
+static void PSO_random_symmetry_initialize (void)
+{
+ int b;
+ for (b=0;b<(psos[cur_pso].dimension);b++)
+ {
+ if (psos[cur_pso].env_data.boundary_flag)
+ {
+ (psos[cur_pso].position_values)[psos[cur_pso].popu_index][b] = (float)((psos[cur_pso].env_data.up_boundaries[b]) - (psos[cur_pso].env_data.low_boundaries[b] ) ) * ((rand()%1000)/1000.0) + (psos[cur_pso].env_data.low_boundaries[b] );
+ psos[cur_pso].pbest_position_values[psos[cur_pso].popu_index][b] = (psos[cur_pso].position_values)[psos[cur_pso].popu_index][b];
+ psos[cur_pso].velocity_values[psos[cur_pso].popu_index][b] = 0.5* ((psos[cur_pso].env_data.up_boundaries[b]) - (psos[cur_pso].env_data.low_boundaries[b] ) ) * ((rand()%1000)/1000.0);
+ }
+ else
+ {
+ ((psos[cur_pso].position_values)[psos[cur_pso].popu_index][b] ) = (float) (((psos[cur_pso].env_data.init_range.right) - (psos[cur_pso].env_data.init_range.left ))*((rand()%1000)/1000.0) + (psos[cur_pso].env_data.init_range.left ));
+ psos[cur_pso].pbest_position_values[psos[cur_pso].popu_index][b] = (psos[cur_pso].position_values)[psos[cur_pso].popu_index][b];
+ psos[cur_pso].velocity_values[psos[cur_pso].popu_index][b] = (psos[cur_pso].env_data.max_velocity)*((rand()%1000)/1000.0);
+ }
+ if (((rand()%1000)/1000.0) > 0.5)
+ {
+ psos[cur_pso].velocity_values[psos[cur_pso].popu_index][b] = -(psos[cur_pso].velocity_values[psos[cur_pso].popu_index][b] );
+ }
+ }
+}
+
+/*************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description: Asymmetry initialization
+*
+**************************************************/
+static void PSO_random_asymmetry_initialize (void)
+{
+ int b;
+ for (b=0;b<(psos[cur_pso].dimension);b++)
+ {
+ if (psos[cur_pso].env_data.boundary_flag)
+ {
+ (psos[cur_pso].position_values)[psos[cur_pso].popu_index][b] = (float)((psos[cur_pso].env_data.up_boundaries[b]) - (psos[cur_pso].env_data.low_boundaries[b] ) ) * ((rand()%1000)/1000.0) + (psos[cur_pso].env_data.low_boundaries[b] );
+ psos[cur_pso].pbest_position_values[psos[cur_pso].popu_index][b] = (psos[cur_pso].position_values)[psos[cur_pso].popu_index][b];
+ psos[cur_pso].velocity_values[psos[cur_pso].popu_index][b] = 0.5* ((psos[cur_pso].env_data.up_boundaries[b]) - (psos[cur_pso].env_data.low_boundaries[b] ) ) * ((rand()%1000)/1000.0);
+ }
+ else
+ {
+ ((psos[cur_pso].position_values)[psos[cur_pso].popu_index][b] ) = (float) (((psos[cur_pso].env_data.init_range.right) - (psos[cur_pso].env_data.init_range.left ))*((rand()%1000)/1000.0) + (psos[cur_pso].env_data.init_range.left ));
+ psos[cur_pso].pbest_position_values[psos[cur_pso].popu_index][b] = (psos[cur_pso].position_values)[psos[cur_pso].popu_index][b];
+ psos[cur_pso].velocity_values[psos[cur_pso].popu_index][b] = (psos[cur_pso].env_data.max_velocity)*((rand()%1000)/1000.0);
+ }
+ if (((rand()%1000)/1000.0) > 0.5)
+ {
+ psos[cur_pso].velocity_values[psos[cur_pso].popu_index][b] = -(psos[cur_pso].velocity_values[psos[cur_pso].popu_index][b] );
+ }
+ }
+}
diff --git a/pso_nn/psostate.h b/pso_nn/psostate.h
new file mode 100644
index 0000000..692f55b
--- /dev/null
+++ b/pso_nn/psostate.h
@@ -0,0 +1,133 @@
+#ifndef PSO_STATE_H
+#define PSO_STATE_H
+
+#include "definiti.h"
+
+/**************************************/
+/* Constants and Macros */
+/**************************************/
+
+/**************************************/
+/* Enumerations */
+/**************************************/
+
+typedef enum PSO_State_Tag
+{
+ //PSO_INITIALIZE, // Initialize the population
+ PSO_UPDATE_INERTIA_WEIGHT, // Update inertia weight
+ PSO_EVALUATE, // Evaluate partiles
+ PSO_UPDATE_GLOBAL_BEST, // Update global best
+ PSO_UPDATE_LOCAL_BEST, // Update local best
+ PSO_UPDTAE_VELOCITY, // Update particle's velocity
+ PSO_UPDATE_POSITION, // Update particle's position
+ PSO_GOAL_REACH_JUDGE, // Judge whether reach the goal
+ PSO_NEXT_GENERATION, // Move to the next generation
+ PSO_UPDATE_PBEST_EACH_CYCLE, // Update pbest each cycle for co-pso due to the environment changed
+ PSO_NEXT_PSO, // Move to the next PSO in the same cycle or the first pso in the next cycle
+ PSOS_DONE, // Finish one cycle of PSOs
+ NUM_PSO_STATES // Total number of PSO states
+} PSO_State_Type;
+
+typedef enum PSO_Initialize_Tag
+{
+ PSO_RANDOM_SYMMETRY_INITIALIZE, // 0 :Symmetry Initialization
+ PSO_RANDOM_ASYMMETRY_INITIALIZE, // 1 :Symmetry Initialization
+ NUM_PSO_INITIALIZE // Number of initialization methods
+} PSO_Initialize_Type;
+
+typedef enum MINMAX_Tag
+{
+ MINIMIZATION, // 0 :Minimization problem
+ MAXIMIZATION // 1 :Maximization problem
+} MINMAX_Type;
+
+typedef enum Evaluate_Function_Tag
+{
+ SIMPLE_MIN, // 0 :Simple min function
+ SIMPLE_MAX, // 1 :Simple Max function
+ MICHALEWICZ_MIN, // 2 :augmented lagrangian 1 from Michalewicz 94 paper, min part
+ MICHALEWICZ_MAX, // 3 :augmented lagrangian 1 from Michalewicz 94 paper, max part
+ MICHALEWICZ_2_MIN, // 4 :augmented lagrangian 2 from Michalewicz 94 paper, min part
+ MICHALEWICZ_2_MAX, // 5 :augmented lagrangian 2 from Michalewicz 94 paper, max part
+ G1_MIN, // 6 :G1 from Tahk and Sun's IEEE Trans EC paper, min part
+ G1_MAX, // 7 :G2 from Tahk and Sun's IEEE Trans EC paper, max part
+ G7_MIN, // 8 :G7 from Tahk and Sun's IEEE Trans EC paper, min part
+ G7_MAX, // 9 :G7 from Tahk and Sun's IEEE Trans EC paper, max part
+ G9_MIN, // 10 :G9 from Tahk and Sun's IEEE Trans EC paper, min part
+ G9_MAX, // 11 :G9 from Tahk and Sun's IEEE Trans EC paper, max part
+ F6, // 12 :F6: min
+ SPHERE, // 13 :Sphere: min
+ ROSENBROCK, // 14 :Rosenbrock: min
+ RASTRIGRIN, // 15 :Rastrigrin: min
+ GRIEWANK, // 16 :Griewank: min
+ BP_MAX, // 17 :BP net maximum problem
+ NUM_EVALUATE_FUNCTIONS // Total number of evaluation functions
+} Evaluate_Function_Type;
+
+
+typedef enum Inertia_Weight_Update_Method_Tag
+{
+ CONSTANT_IW, // 0 :constant inertia weight
+ LINEAR_IW, // 1 :Linearly decreasing inertia weight
+ FUZZY_TWO_IW, // 2 :Applying fuzzy system with two inputs
+ FUZZY_FOUR_IW, // 3 :Applying fuzzy system with four inputs
+ NOISE_ADDITION_IW, // 4 :Adding nosie to the constant inertia weight
+ NUM_IW_UPDATE_METHODS // Number of inertia weight update methods
+} IW_Update_Type;
+
+/**************************************/
+/* Structures */
+/**************************************/
+typedef struct PSO_Initizlize_Range_Type_Tag
+{
+ float left;
+ float right;
+} PSO_Initizlize_Range_Type;
+
+typedef struct PSO_Environment_Type_Tag // PSO working condition
+{
+ MINMAX_Type opti_type;
+ Evaluate_Function_Type function_type;
+ IW_Update_Type iw_method;
+ PSO_Initialize_Type init_type;
+ PSO_Initizlize_Range_Type init_range;
+ float max_velocity;
+ float max_position;
+ int max_generation;
+ int boundary_flag; // 1: boundary; 0: no boundary
+ FVECTOR low_boundaries;
+ FVECTOR up_boundaries;
+} PSO_Environment_Type;
+
+typedef struct PSO_Type_Tag // PSO parameters
+{
+ PSO_Environment_Type env_data;
+ int popu_size;
+ int dimension;
+ float inertia_weight;
+ float init_inertia_weight;
+ int global_best_index;
+ FVECTOR pbest_values;
+ FMATRIX velocity_values;
+ FMATRIX position_values;
+ FMATRIX pbest_position_values;
+ float eva_fun_value; // value obtained from evaluatation for current individual
+ int popu_index;
+ int gene_index;
+} PSO_Type;
+
+
+/**************************************/
+/* Global and Const Variable */
+/**************************************/
+
+/**************************************/
+/* Function Prototypes */
+/**************************************/
+extern void PSO_Main_Loop(void);
+extern void PSO_Start_Up(char *dataFile);
+extern void PSO_Clean_Up(void);
+
+#define min(x,y) ((x) < (y) ? (x) : (y))
+#define max(x,y) ((x) > (y) ? (x) : (y))
+#endif
diff --git a/pso_nn/sigmoid.c b/pso_nn/sigmoid.c
new file mode 100644
index 0000000..3c3683e
--- /dev/null
+++ b/pso_nn/sigmoid.c
@@ -0,0 +1,16 @@
+#include "math.h"
+#include "sigmoid.h"
+
+#define Beta 1
+
+double
+sigmoid(double x)
+{
+ x=-1*Beta*x;
+ x=exp(x);
+ x+=1.0;
+ x=1/x;
+ return(x);
+}
+
+
diff --git a/pso_nn/sigmoid.h b/pso_nn/sigmoid.h
new file mode 100644
index 0000000..5e06274
--- /dev/null
+++ b/pso_nn/sigmoid.h
@@ -0,0 +1,6 @@
+#ifndef __SIGMOID_H_
+#define __SIGMOID_H_
+
+double sigmoid(double x);
+
+#endif
|
btbytes/ci
|
59ba4d911be8269a1d64974561783c5ade748b9c
|
adding Self-organizing feature map neural network
|
diff --git a/sofm/Makefile b/sofm/Makefile
new file mode 100644
index 0000000..adb0445
--- /dev/null
+++ b/sofm/Makefile
@@ -0,0 +1,3 @@
+all: definiti.h headfile.h mem_loc.h nnet.h sofm.h
+ gcc -Wall -lm main.c mem_loc.c sofm.c -o sofm
+
diff --git a/sofm/definiti.h b/sofm/definiti.h
new file mode 100644
index 0000000..e35e944
--- /dev/null
+++ b/sofm/definiti.h
@@ -0,0 +1,14 @@
+#ifndef DEFINITION_H
+#define DEFINITION_H
+
+typedef enum BOOLEAN_Tag {FALSE, TRUE} BOOLEAN;
+
+typedef int *P_INT;
+typedef P_INT IVECTOR;
+typedef P_INT *IMATRIX;
+
+typedef float *P_FLOAT;
+typedef P_FLOAT FVECTOR;
+typedef P_FLOAT *FMATRIX;
+
+#endif
diff --git a/sofm/headfile.h b/sofm/headfile.h
new file mode 100644
index 0000000..cca8e6e
--- /dev/null
+++ b/sofm/headfile.h
@@ -0,0 +1,10 @@
+#ifndef __HEADFILE_H__
+#define __HEADFILE_H__
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <math.h>
+#include <string.h>
+#include <time.h> //YS 01/16/98
+
+#endif
diff --git a/sofm/main.c b/sofm/main.c
new file mode 100644
index 0000000..fd48776
--- /dev/null
+++ b/sofm/main.c
@@ -0,0 +1,41 @@
+#include "headfile.h"
+#include "sofm.h"
+#include "mem_loc.h"
+
+/**************************************/
+/* Constants and Macros */
+/**************************************/
+
+static void main_start_up(char *dataFile);
+static void main_clean_up(void);
+
+void main (int argc, char *argv[])
+{
+ // check command line
+ if (argc != 2)
+ {
+ printf("Usage: exe_file run_file\n");
+ exit(1);
+ }
+
+ // initialize
+ main_start_up(argv[1]);
+
+ // run
+ SOFM_Main_Loop();
+
+ // clean up memory space
+ main_clean_up();
+
+}
+
+static void main_start_up (char *dataFile)
+{
+ SOFM_Start_Up(dataFile);
+
+}
+
+static void main_clean_up (void)
+{
+ SOFM_Clean_Up();
+}
diff --git a/sofm/mem_loc.c b/sofm/mem_loc.c
new file mode 100644
index 0000000..81d35c4
--- /dev/null
+++ b/sofm/mem_loc.c
@@ -0,0 +1,98 @@
+#include "headfile.h"
+#include "mem_loc.h"
+
+/**********************************************************************
+ If you want to allocate a block larger than 64K, you must use
+ farcalloc instead of calloc
+**********************************************************************/
+
+/* Memory allocation functions for integer matrix and vector */
+
+void IVectorAllocate(IVECTOR *ivector, int nCols)
+{
+ if ((*ivector = (IVECTOR) calloc(nCols, sizeof(long int))) == NULL)
+ {
+ fprintf(stderr, "Sorry! Not enough memory for vector\n");
+ exit(1);
+ }
+}
+
+/* Allocate space for columns (int cells) for
+ dynamic two dimensional matrix[rows][cols]
+*/
+
+void IAllocateCols(P_INT imatrix[], int nRows, int nCols)
+{
+ int i;
+ for (i = 0; i < nRows; i++)
+ IVectorAllocate(&imatrix[i], nCols);
+}
+
+/* Allocate space for a two dimensional dynamic matrix [rows] [cols]
+*/
+
+void IMatrixAllocate(IMATRIX *ipmatrix, int nRows, int nCols)
+{
+ if ( (*ipmatrix = (IMATRIX) calloc(nRows, sizeof(long int) ) ) == NULL)
+ {
+ fprintf(stderr, "Sorry! Not enough memory for matrix\n");
+ exit(1);
+ }
+
+ IAllocateCols(*ipmatrix, nRows, nCols);
+}
+
+/* free space for two dimensional dynamic array */
+void IMatrixFree(IMATRIX imatrix, int nRows)
+{
+ int i;
+ for (i = 0; i < nRows; i++)
+ free(imatrix[i]);
+ free(imatrix);
+}
+
+/* *************** Float routines *************** */
+
+void FVectorAllocate(FVECTOR *fvector, int nCols)
+{
+ if ((*fvector = (FVECTOR) calloc(nCols, sizeof(float))) == NULL)
+ {
+ fprintf(stderr, "Sorry! Not enough memory for fvector\n");
+ exit(1);
+ }
+}
+
+/* Allocate space for columns (int cells) for
+ dynamic two dimensional matrix[rows][cols]
+*/
+
+void FAllocateCols(P_FLOAT fmatrix[], int nRows, int nCols)
+{
+ int i;
+ for (i = 0; i < nRows; i++)
+ FVectorAllocate(&fmatrix[i], nCols);
+}
+
+/* Allocate space for a two dimensional dynamic matrix [rows] [cols]
+*/
+
+void FMatrixAllocate(FMATRIX *fpmatrix, int nRows, int nCols)
+{
+ if ( (*fpmatrix = (FMATRIX) calloc(nRows, sizeof(float))) == NULL)
+ {
+ fprintf(stderr, "Sorry! Not enough memory for fmatrix\n");
+ exit(1);
+ }
+
+ FAllocateCols(*fpmatrix, nRows, nCols);
+}
+
+/* free space for two dimensional dynamic array */
+void FMatrixFree(FMATRIX fmatrix, int nRows)
+{
+ int i;
+ for (i = 0; i < nRows; i++)
+ free(fmatrix[i]);
+ free(fmatrix);
+}
+
diff --git a/sofm/mem_loc.h b/sofm/mem_loc.h
new file mode 100644
index 0000000..e79b2cd
--- /dev/null
+++ b/sofm/mem_loc.h
@@ -0,0 +1,17 @@
+#ifndef __MEM_LOC_H__
+#define __MEM_LOC_H__
+
+#include "definiti.h"
+
+extern void IVectorAllocate(IVECTOR *ivector, int nCols);
+extern void IMatrixFree(IMATRIX imatrix, int nRows);
+extern void IMatrixAllocate(IMATRIX *ipmatrix, int nRows, int nCols);
+extern void IAllocateCols(P_INT imatrix[], int nRows, int nCols);
+
+extern void FVectorAllocate(FVECTOR *fvector, int nCols);
+extern void FMatrixFree(FMATRIX fmatrix, int nRows);
+extern void FMatrixAllocate(FMATRIX *fpmatrix, int nRows, int nCols);
+extern void FAllocateCols(P_FLOAT fmatrix[], int nRows, int nCols);
+
+#endif
+
diff --git a/sofm/nnet.h b/sofm/nnet.h
new file mode 100644
index 0000000..343ff1e
--- /dev/null
+++ b/sofm/nnet.h
@@ -0,0 +1,32 @@
+#ifndef NEURAL_NET_H
+#define NEURAL_NET_H
+
+#include "definiti.h"
+
+/**************************************************************/
+/* Enumerations */
+/**************************************************************/
+typedef enum NN_Operation_Mode_Type_Tag
+{
+ NN_TRAINING,
+ NN_RECALL,
+ NUM_BP_OPERATION_MODES
+} NN_Operation_Mode_Type;
+
+typedef enum NN_Function_Type_Tag
+{
+ NN_LINEAR_FUNCTION,
+ NN_GAUSIAN_FUNCTION,
+ NN_SIGMOID_FUNCTION,
+ NUM_NN_FUNCTION_TYPES
+} NN_Function_Type;
+
+typedef enum NN_Layer_Type_Tag
+{
+ NN_INPUT_LAYER,
+ NN_HIDDEN_LAYER,
+ NN_OUTPUT_LAYER,
+ NUM_NN_LAYERS
+} NN_Layer_Type;
+
+#endif
diff --git a/sofm/sofm.c b/sofm/sofm.c
new file mode 100644
index 0000000..d3bf20a
--- /dev/null
+++ b/sofm/sofm.c
@@ -0,0 +1,1262 @@
+#include "headfile.h"
+#include "definiti.h"
+#include "sofm.h"
+#include "mem_loc.h"
+
+#define MAX_NUM_CHARS 100
+#define NEIGHBORHODD_DISTANCE 1
+
+#define SAVE_CLUSTER 1
+/**************************************************************/
+/* Static Variable and Const Variable with File level scope */
+/**************************************************************/
+ static SOFM_Type sofm;
+ static SOFM_Pattern_Set_Type patset;
+ static SOFM_State_Type sofm_cur_state;
+
+ #if SAVE_CLUSTER
+ static IMATRIX cluster_result;
+ #endif
+
+ static char pat_file_name[MAX_NUM_CHARS];
+ static float *target_out;
+ static IMATRIX test_result;
+ static SOFM_2D_Size_Type ite_per_update_neighbor;
+ static SOFM_2D_Size_Type slab_out_size;
+/**************************************************************/
+/* Function Prototypes for functions with file level scope */
+/**************************************************************/
+static void read_sofm_parameters(char *dataFile); // read SOFM parameters from I/O file
+static void allocate_sofm_memory(void); // allocate SOFM memory spaces
+static void free_sofm_memory(void); // free SOFM memory spaces
+static void store_sofm_results(void); // write SOFM results to I/O file
+
+static void sofm_initialize(void);
+static void sofm_initialize_weights(void);
+static void sofm_state_handler(int); // SOFM state handle routine
+
+static void sofm_get_pattern(void);
+static void sofm_weight_normalization(void);
+static void sofm_feedforward_input(void);
+static void sofm_feedforward_output(void);
+static void sofm_winning_neuron(void);
+static void sofm_update_neighborhood(void);
+//static void sofm_weight_step_change(void);
+static void sofm_weight_change(void);
+static void sofm_next_pattern(void);
+static void sofm_next_iteration(void);
+static void sofm_update_learning_rate(void);
+static void sofm_update_conscience_factor(void);
+static void sofm_training_done(void);
+static void sofm_recall_done(void);
+
+static float activate_function(float, int);
+static float nn_linear_function(float);
+static float nn_gausian_function(float);
+static float nn_sigmoid_function(float);
+
+static void print_net_parameters(void);
+static void update_recall_result(void);
+
+static float neighbor_func(int,int);
+static float chef_hat(int);
+static float mexican_hat(int);
+static float stovepipe_hat(int);
+
+/**************************************************************/
+/* Function Definitions */
+/**************************************************************/
+
+
+/**************************************************************/
+/* SOFM Start and clean routines and interfaces */
+/**************************************************************/
+
+/**************************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+***************************************************************/
+
+void SOFM_Start_Up (char *dataFile)
+{
+ read_sofm_parameters(dataFile);
+ allocate_sofm_memory(); // allocate memory for SOFM
+ sofm_initialize();
+}
+
+/*************************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+*************************************************************/
+void SOFM_Clean_Up (void)
+{
+ store_sofm_results();
+ free_sofm_memory(); // free memory space of SOFM
+}
+
+
+/************************************************************/
+/* SOFM functons */
+/************************************************************/
+/************************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+*************************************************************/
+void SOFM_Main_Loop (void)
+{
+ BOOLEAN running;
+
+ running = TRUE;
+ while (running)
+ {
+ if ((sofm_cur_state == SOFM_TRAINING_DONE) || (sofm_cur_state == SOFM_RECALL_DONE) )
+ {
+ running = FALSE;
+ }
+ sofm_state_handler(sofm_cur_state);
+ }
+}
+
+/************************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+*************************************************************/
+static void read_sofm_parameters (char *dataFile)
+{
+ FILE *frun;
+
+ // open the runfile to input parameters
+ if ((frun=fopen(dataFile,"r"))==NULL)
+ {
+ printf("Cant read run file");
+ exit(1);
+ }
+
+ // read SOFM's parameters from run file
+ // read SOFM's environment data
+ fscanf(frun, "%d",&(sofm.env.operation_mode)); // training or recall
+ fscanf(frun, "%d",&(sofm.env.train_mode)); // training mode if in training operation mode
+ fscanf(frun, "%f",&(sofm.env.eta)); // learning rate
+ fscanf(frun, "%f",&(sofm.env.shrink)); // learning rate shrinking coefficient
+ fscanf(frun, "%f",&(sofm.env.gama)); // bias factor
+ fscanf(frun, "%f",&(sofm.env.beta)); // beta
+ fscanf(frun, "%f",&(sofm.env.criterion)); // training error criterion for termination
+ fscanf(frun, "%d",&(sofm.env.max_ite)); // maximum number of generations
+ fscanf(frun, "%d",&(sofm.env.max_tra_pat)); // total number of training patterns
+ fscanf(frun, "%d",&(sofm.env.conscience)); // 0: no conscience, 1: conscience
+ fscanf(frun, "%d",&(sofm.env.neighbor.width)); // initial width of neighborhood
+ fscanf(frun, "%d",&(sofm.env.neighbor.height)); // initial height of neighborhood
+
+ // read SOFM's Arch
+ fscanf(frun, "%d",&(slab_out_size.height)); // out slab height
+ fscanf(frun, "%d",&(slab_out_size.width)); // out slab width
+ fscanf(frun, "%d",&(sofm.env.neighbor_function)); // neighborhood function
+
+ // read in/out pattern parameters
+ fscanf(frun, "%d",&(patset.size)); // number of pattern
+ // read pattern input dimension
+ fscanf(frun, "%d",&(patset.dim_in)); // input dimension
+ // read pattern output dimension
+ fscanf(frun, "%d",&(patset.dim_out)); // output dimension
+ // read pattern file name
+ fscanf(frun, "%s", pat_file_name); // pattern file name
+
+ fclose(frun);
+
+ print_net_parameters();
+}
+
+static void print_net_parameters (void)
+{
+ // test
+ printf( "%d\n",(sofm.env.operation_mode)); // training or recall
+ printf( "%d\n",(sofm.env.train_mode));
+ printf( "%f\n",(sofm.env.eta)); // learning rate
+ printf( "%f\n",(sofm.env.shrink)); // learning rate shrinking coefficient
+ printf( "%f\n",(sofm.env.gama)); // bias factor
+ printf( "%f\n",(sofm.env.beta)); //
+ printf( "%f\n",(sofm.env.criterion)); // training error criterion for termination
+ printf( "%d\n",(sofm.env.max_ite)); // maximum number of generations
+ printf( "%d\n",(sofm.env.max_tra_pat)); // total number of training patterns
+ printf( "%d\n",(sofm.env.conscience));
+ printf( "%d\n",(sofm.env.neighbor.width)); // initial width of neighborhood
+ printf( "%d\n",(sofm.env.neighbor.height)); // initial height of neighborhood
+ printf( "%d\n",(slab_out_size.height));
+ printf( "%d\n",(slab_out_size.width));
+ printf( "%d\n\n",(sofm.env.neighbor_function));
+ printf( "%d\n",(patset.size)); // number of pattern
+ // read pattern input dimension
+ printf( "%d\n",(patset.dim_in)); // input dimension
+ // read pattern output dimension
+ printf( "%d\n",(patset.dim_out)); // output dimension
+ // read pattern file name
+ printf( "%s\n", pat_file_name);
+}
+
+static void update_recall_result(void)
+{
+ int idx_out;
+
+ if (patset.dim_out > 0)
+ {
+ for (idx_out = 0; idx_out < patset.dim_out; idx_out++)
+ { // loop throught the neurons of the output layer
+ if (target_out[idx_out] > 0.1)
+ {
+ #if SAVE_CLUSTER
+ cluster_result[sofm.env.cur_pat][2] = idx_out;
+ #endif
+ test_result[sofm.env.winner.height * sofm.arch.slabs[1].size.width + sofm.env.winner.width][idx_out]++;
+ }
+ }
+ }
+ else
+ {
+ test_result[sofm.env.winner.height * sofm.arch.slabs[1].size.width + sofm.env.winner.width][0]++;
+ }
+ #if SAVE_CLUSTER
+ cluster_result[sofm.env.cur_pat][0] = sofm.env.winner.height;
+ cluster_result[sofm.env.cur_pat][1] = sofm.env.winner.width;
+ #endif
+}
+
+/************************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+*************************************************************/
+static void allocate_sofm_memory (void)
+{
+ int idx,idx_h;
+
+ // allocate memory for SOFM net
+ sofm.arch.size = 2;
+ sofm.arch.slabs = calloc(sofm.arch.size,sizeof(NN_Slab_Arch_Type));
+
+ //allocate memory for input layer
+ sofm.arch.slabs[0].size.width = patset.dim_in;
+ sofm.arch.slabs[0].size.height = 1;
+ sofm.arch.slabs[0].neurons = (Neuron_Type**)calloc(sofm.arch.slabs[0].size.height,sizeof(Neuron_Type));
+ for (idx_h = 0; idx_h < sofm.arch.slabs[0].size.height; idx_h++)
+ {
+ sofm.arch.slabs[0].neurons[idx_h] = (Neuron_Type*)calloc(sofm.arch.slabs[0].size.width,sizeof(Neuron_Type));
+ }
+
+ sofm.arch.slabs[0].slab_type = NN_INPUT_LAYER;
+ // specify and allocate memory for neurons of input layer
+ for (idx = 0 ; idx < sofm.arch.slabs[0].size.width ; idx++)
+ {
+ for (idx_h = 0 ; idx_h < sofm.arch.slabs[0].size.height ; idx_h++)
+ {
+ sofm.arch.slabs[0].neurons[idx_h][idx].neuron_function = NN_LINEAR_FUNCTION;
+ FVectorAllocate(&(sofm.arch.slabs[0].neurons[idx_h][idx].delta_w),1);
+ FVectorAllocate(&(sofm.arch.slabs[0].neurons[idx_h][idx].w),1);
+ }
+ }
+
+ // allocate memory for output layer
+ sofm.arch.slabs[sofm.arch.size - 1].size.width = slab_out_size.width;
+ sofm.arch.slabs[sofm.arch.size - 1].size.height = slab_out_size.height;
+
+ sofm.arch.slabs[sofm.arch.size - 1].neurons = (Neuron_Type**)calloc(sofm.arch.slabs[sofm.arch.size - 1].size.height,sizeof(Neuron_Type));
+ for (idx_h = 0; idx_h < sofm.arch.slabs[sofm.arch.size - 1].size.height; idx_h++)
+ {
+ sofm.arch.slabs[sofm.arch.size - 1].neurons[idx_h] = (Neuron_Type*)calloc(sofm.arch.slabs[sofm.arch.size - 1].size.width,sizeof(Neuron_Type));
+ }
+ sofm.arch.slabs[sofm.arch.size - 1].slab_type = NN_OUTPUT_LAYER;
+ // specify and allocate memory for neurons of output layer
+ for (idx =0 ; idx < sofm.arch.slabs[sofm.arch.size - 1].size.width ; idx++)
+ {
+ for (idx_h = 0 ; idx_h < sofm.arch.slabs[sofm.arch.size - 1].size.height ; idx_h++)
+ {
+ sofm.arch.slabs[sofm.arch.size - 1].neurons[idx_h][idx].neuron_function = NN_LINEAR_FUNCTION;
+ sofm.arch.slabs[sofm.arch.size - 1].neurons[idx_h][idx].c_f = 1.0/(sofm.arch.slabs[sofm.arch.size - 1].size.height * sofm.arch.slabs[sofm.arch.size - 1].size.width);
+ sofm.arch.slabs[sofm.arch.size - 1].neurons[idx_h][idx].b_v = 0.0;
+ sofm.arch.slabs[sofm.arch.size - 1].neurons[idx_h][idx].w_s = 0; // no winner at beginning
+ FVectorAllocate(&(sofm.arch.slabs[sofm.arch.size - 1].neurons[idx_h][idx].delta_w),sofm.arch.slabs[sofm.arch.size - 2].size.width);
+ FVectorAllocate(&(sofm.arch.slabs[sofm.arch.size - 1].neurons[idx_h][idx].w),sofm.arch.slabs[sofm.arch.size - 2].size.width);
+ }
+ }
+
+ // allocate memory for pattern set
+ FMatrixAllocate(&(patset.patterns),patset.size,(patset.dim_in + patset.dim_out));
+
+ #if SAVE_CLUSTER
+ // allocate memory for storing cluster information for each pattern
+ if (sofm.env.operation_mode == NN_RECALL)
+ { // allocate for test result storage
+ if (patset.dim_out == 0)
+ {
+ IMatrixAllocate(&(cluster_result),patset.size,2);
+ }
+ else
+ {
+ IMatrixAllocate(&(cluster_result),patset.size,3);
+ }
+ }
+ #endif
+
+ // allocate memory for target output
+ if (patset.dim_out > 0)
+ {
+ target_out = calloc(patset.dim_out,sizeof(float));
+ }
+ if (sofm.env.operation_mode == NN_RECALL)
+ { // allocate for test result storage
+ if (patset.dim_out == 0)
+ {
+ IMatrixAllocate(&(test_result),(sofm.arch.slabs[1].size.height * sofm.arch.slabs[1].size.width),1);
+ }
+ else
+ {
+ IMatrixAllocate(&(test_result),(sofm.arch.slabs[1].size.height * sofm.arch.slabs[1].size.width),patset.dim_out);
+ }
+ }
+}
+
+/************************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+*************************************************************/
+static void free_sofm_memory (void)
+{
+ int idx,idx_h;
+ if (sofm.env.operation_mode == NN_RECALL)
+ { // allocate for test result storage
+ IMatrixFree(test_result,(sofm.arch.slabs[1].size.height * sofm.arch.slabs[1].size.width));
+ #if SVAE_CLUSTER
+ ImatrixFree(cluster_result,patset.size);
+ #endif
+ }
+ // free memory for pattern set
+ FMatrixFree(patset.patterns,patset.size);
+
+ // free memory for SOFM net
+ // free memory for output layer
+ for (idx =0 ; idx < sofm.arch.slabs[sofm.arch.size - 1].size.width ; idx++)
+ {
+ for (idx_h = 0 ; idx_h < sofm.arch.slabs[sofm.arch.size - 1].size.height ; idx_h++)
+ {
+ free(sofm.arch.slabs[sofm.arch.size - 1].neurons[idx_h][idx].w);
+ free(sofm.arch.slabs[sofm.arch.size - 1].neurons[idx_h][idx].delta_w);
+ }
+ }
+ for (idx =0 ; idx < sofm.arch.slabs[sofm.arch.size - 1].size.height ; idx++)
+ {
+ free(sofm.arch.slabs[sofm.arch.size - 1].neurons[idx]);
+ }
+ free(sofm.arch.slabs[sofm.arch.size - 1].neurons);
+
+ // free memory for input layer
+ for (idx =0 ; idx < sofm.arch.slabs[0].size.width ; idx++)
+ {
+ for (idx_h = 0 ; idx_h < sofm.arch.slabs[0].size.height ; idx_h++)
+ {
+ free(sofm.arch.slabs[0].neurons[idx_h][idx].w);
+ free(sofm.arch.slabs[0].neurons[idx_h][idx].delta_w);
+ }
+ }
+ for (idx =0 ; idx < sofm.arch.slabs[0].size.height ; idx++)
+ {
+ free(sofm.arch.slabs[0].neurons[idx]);
+ }
+ free(sofm.arch.slabs[0].neurons);
+
+ free(sofm.arch.slabs);
+ if (patset.dim_out >0)
+ {
+ free(target_out);
+ }
+}
+
+/************************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+*************************************************************/
+static void sofm_initialize (void)
+{
+ int idx_i,idx_j;
+ int stime;
+ long ltime;
+ FILE *frun;
+
+ // get the current calendar time
+ ltime = time(NULL);
+ stime = (unsigned) ltime/2;
+ srand(stime);
+
+ // sofm read patterns from file
+ // open the pattern file to obtain in/out patterns
+ if ((frun=fopen(pat_file_name,"r"))==NULL)
+ {
+ printf("Cant read pattern file");
+ exit(1);
+ }
+
+ for (idx_i = 0; idx_i < patset.size; idx_i++)
+ {
+ for (idx_j = 0; idx_j < (patset.dim_in + patset.dim_out) ; idx_j++)
+ {
+ fscanf(frun, "%f",&(patset.patterns[idx_i][idx_j]));
+ }
+ }
+ fclose(frun);
+
+ sofm.env.winner.height = 0;
+ sofm.env.winner.width = 0;
+
+ ite_per_update_neighbor.height = sofm.env.max_ite/(sofm.env.neighbor.height + 1);
+ ite_per_update_neighbor.width = sofm.env.max_ite/(sofm.env.neighbor.width + 1);
+
+ // sofm randomly initialize weights
+ sofm_initialize_weights();
+
+ // sofm initial setting
+ sofm_cur_state = SOFM_GET_PATTERN;
+ sofm.env.cur_ite = 0;
+ sofm.env.cur_pat = 0;
+ sofm.env.pat_counter = 0;
+}
+
+/************************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+*************************************************************/
+static void sofm_initialize_weights (void)
+{
+ int idx_neuron_w,idx_neuron_h,idx_weight;
+ float prob;
+ int idx_cn_h, idx_cn_w,idx_pn;
+ FILE *fout;
+
+ if (sofm.env.operation_mode == NN_TRAINING)
+ {
+ // initialize weight for the neurons in the output layer
+ for (idx_weight = 0; idx_weight < (sofm.arch.slabs[sofm.arch.size - 2].size.width); idx_weight++ )
+ {
+ for (idx_neuron_h = 0; idx_neuron_h < (sofm.arch.slabs[sofm.arch.size - 1].size.height); idx_neuron_h++)
+ {
+ for (idx_neuron_w = 0; idx_neuron_w < (sofm.arch.slabs[sofm.arch.size - 1].size.width); idx_neuron_w++)
+ {
+ prob = ((rand()%1000)/1000.0) * 0.2;
+ sofm.arch.slabs[sofm.arch.size - 1].neurons[idx_neuron_h][idx_neuron_w].w[idx_weight] = prob + 0.4; // (0.4, 0.6)
+ sofm.arch.slabs[sofm.arch.size - 1].neurons[idx_neuron_h][idx_neuron_w].delta_w[idx_weight] = 0.0;
+ }
+ }
+ }
+ }
+ else
+ { // RECALL operation mode
+ // read in weights from file
+ // open file for write
+ if ((fout=fopen("SOFM_res.txt","r"))==NULL)
+ {
+ printf("Cant open file for reading SOFM training results");
+ exit(1);
+ }
+ for (idx_cn_h = 0; idx_cn_h < (sofm.arch.slabs[1].size.height) ; idx_cn_h++)
+ {
+ for (idx_cn_w = 0; idx_cn_w < (sofm.arch.slabs[1].size.width) ; idx_cn_w++)
+ { // loop through the neurons in the current layer
+ for (idx_pn = 0; idx_pn < (sofm.arch.slabs[0].size.width) ; idx_pn++)
+ { // loop through the connect weights of the current neurons
+ fscanf(fout,"%f",&(sofm.arch.slabs[1].neurons[idx_cn_h][idx_cn_w].w[idx_pn]));
+ }
+ }
+ }
+ fclose(fout);
+ }
+}
+
+/************************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+*************************************************************/
+static void store_sofm_results (void)
+{
+ // store weights
+ int idx_cn_h,idx_cn_w, idx_pn;
+ FILE *fout;
+
+ // open file for write
+
+ if (sofm.env.operation_mode == NN_TRAINING)
+ {
+ if ((fout=fopen("SOFM_res.txt","w"))==NULL)
+ {
+ printf("Cant open file for writing SOFM weights");
+ exit(1);
+ }
+ for (idx_cn_h = 0; idx_cn_h < (sofm.arch.slabs[1].size.height) ; idx_cn_h++)
+ {
+ for (idx_cn_w = 0; idx_cn_w < (sofm.arch.slabs[1].size.width) ; idx_cn_w++)
+ { // loop through the neurons in the current layer
+ for (idx_pn = 0; idx_pn < (sofm.arch.slabs[0].size.width) ; idx_pn++)
+ { // loop through the connect weights of the current neurons
+ fprintf(fout,"%f\n",(sofm.arch.slabs[1].neurons[idx_cn_h][idx_cn_w].w[idx_pn]));
+ }
+ fprintf(fout,"\n");
+ }
+ }
+
+ fclose(fout);
+ }
+ else // RECALL
+ {
+ if ((fout=fopen("SOFM_test.txt","w"))==NULL)
+ {
+ printf("Cant open file for writing SOFM test results");
+ exit(1);
+ }
+ fprintf(fout,"\n");
+ if (patset.dim_out >0)
+ {
+ for (idx_pn = 0; idx_pn < patset.dim_out; idx_pn++ )
+ {
+ fprintf(fout,"\tClass %d",idx_pn);
+ }
+ }
+ fprintf(fout,"\n-----------------------------------------------------\n");
+
+ for (idx_cn_h = 0; idx_cn_h < sofm.arch.slabs[1].size.height; idx_cn_h++ )
+ {
+ for (idx_cn_w = 0; idx_cn_w < sofm.arch.slabs[1].size.width; idx_cn_w++ )
+ {
+ fprintf(fout,"%2.2d %2.2d\t", idx_cn_h, idx_cn_w);
+ if (patset.dim_out == 0)
+ {
+ fprintf(fout,"%d\t",test_result[idx_cn_h * sofm.arch.slabs[1].size.width + idx_cn_w][0] );
+ }
+ else
+ {
+ for (idx_pn = 0; idx_pn < patset.dim_out; idx_pn++ )
+ {
+ fprintf(fout,"%d\t",test_result[idx_cn_h * sofm.arch.slabs[1].size.width + idx_cn_w][idx_pn] );
+ }
+ }
+ fprintf(fout,"\n");
+ }
+ }
+ fprintf(fout,"\n");
+ #if SAVE_CLUSTER
+ fprintf(fout,"\n\--------------------------------------------------------\n");
+ fprintf(fout,"\tcluster number for each pattern\n");
+ if (patset.dim_out == 0)
+ {
+ fprintf(fout,"\nindex\tcluster height\tcluster width \n");
+ }
+ else
+ {
+ fprintf(fout,"\nindex\tcluster height\tcluster width\ttarget \n");
+ }
+ for (idx_cn_h = 0; idx_cn_h < patset.size; idx_cn_h++)
+ {
+ if (patset.dim_out == 0)
+ {
+ fprintf(fout,"%d\t%d\t%d\n",idx_cn_h, cluster_result[idx_cn_h][0], cluster_result[idx_cn_h][1] );
+ }
+ else
+ {
+ fprintf(fout,"%d\t%d\t%d\t\t%d\n",idx_cn_h, cluster_result[idx_cn_h][0], cluster_result[idx_cn_h][1],cluster_result[idx_cn_h][2] );
+ }
+ }
+ fprintf(fout,"\n");
+ #endif
+ fclose(fout);
+ }
+}
+
+/************************************************************/
+/* SOFM State Handlers */
+/************************************************************/
+
+/************************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+*************************************************************/
+static void sofm_state_handler (int state_index)
+{
+ switch (state_index)
+ {
+ case SOFM_GET_PATTERN:
+ sofm_get_pattern();
+ break;
+ case SOFM_WEIGHT_NORMALIZATION:
+ sofm_weight_normalization();
+ break;
+ case SOFM_FEEDFORWARD_INPUT:
+ sofm_feedforward_input();
+ break;
+ case SOFM_FEEDFORWARD_OUTPUT:
+ sofm_feedforward_output();
+ break;
+ case SOFM_WINNING_NEURON:
+ sofm_winning_neuron();
+ break;
+ case SOFM_UPDATE_NEIGHBORHOOD:
+ sofm_update_neighborhood();
+ break;
+ //case SOFM_WEIGHT_STEP_CHANGE:
+ // sofm_weight_step_change();
+ // break;
+ case SOFM_WEIGHT_CHANGE:
+ sofm_weight_change();
+ break;
+ case SOFM_NEXT_PATTERN:
+ sofm_next_pattern();
+ break;
+ case SOFM_NEXT_ITERATION:
+ sofm_next_iteration();
+ break;
+ case SOFM_UPDATE_LEARNING_RATE:
+ sofm_update_learning_rate();
+ break;
+ case SOFM_UPDATE_CONSCIENCE_FACTOR:
+ sofm_update_conscience_factor();
+ break;
+ case SOFM_TRAINING_DONE:
+ sofm_training_done();
+ break;
+ case SOFM_RECALL_DONE:
+ sofm_recall_done();
+ break;
+ default:
+ break;
+ }
+}
+
+/************************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+*************************************************************/
+static void sofm_get_pattern (void)
+{
+ int idx;
+
+ for (idx = 0; idx < (sofm.arch.slabs[0].size.width); idx++)
+ {
+ sofm.arch.slabs[0].neurons[0][idx].in = patset.patterns[sofm.env.cur_pat][idx];
+ }
+ if (patset.dim_out > 0)
+ {
+ for (idx = 0; idx < patset.dim_out; idx++ )
+ {
+ target_out[idx] = patset.patterns[sofm.env.cur_pat][patset.dim_in + idx];
+ //printf("%d: %f\n",sofm.env.cur_pat, target_out[idx]);
+ }
+ }
+ if (sofm.env.operation_mode == NN_TRAINING)
+ {
+ sofm_cur_state = SOFM_WEIGHT_NORMALIZATION;
+ }
+ else
+ {
+ sofm_cur_state = SOFM_FEEDFORWARD_INPUT;
+ }
+}
+
+/************************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+*************************************************************/
+static void sofm_weight_normalization (void)
+{
+ int idx_cn_h,idx_cn_w, idx_pn;
+ double sum;
+ float temp_f;
+
+ for (idx_cn_h = 0; idx_cn_h < (sofm.arch.slabs[1].size.height) ; idx_cn_h++)
+ {
+ for (idx_cn_w = 0; idx_cn_w < (sofm.arch.slabs[1].size.width) ; idx_cn_w++)
+ { // loop through neurons in the output layer
+ sum = 0.0;
+ for (idx_pn = 0; idx_pn < (sofm.arch.slabs[0].size.width) ; idx_pn++ )
+ { // loop through all the weights connected to this neuron
+ sum += sofm.arch.slabs[1].neurons[idx_cn_h][idx_cn_w].w[idx_pn] * sofm.arch.slabs[1].neurons[idx_cn_h][idx_cn_w].w[idx_pn];
+ }
+ sum = sqrt(sum);
+ if (sum > 0.0)
+ {
+ for (idx_pn = 0; idx_pn < (sofm.arch.slabs[0].size.width) ; idx_pn++ )
+ { // loop through all the weights connected to this neuron
+ temp_f = sofm.arch.slabs[1].neurons[idx_cn_h][idx_cn_w].w[idx_pn]/sum;
+ sofm.arch.slabs[1].neurons[idx_cn_h][idx_cn_w].w[idx_pn] = temp_f;
+ }
+ }
+ }
+ }
+ sofm_cur_state = SOFM_FEEDFORWARD_INPUT;
+}
+
+/************************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+*************************************************************/
+static void sofm_feedforward_input(void)
+{
+ int idx;
+
+ for (idx = 0; idx < (sofm.arch.slabs[0].size.width); idx++)
+ {
+ sofm.arch.slabs[0].neurons[0][idx].out = sofm.arch.slabs[0].neurons[0][idx].in;
+ }
+
+ sofm_cur_state = SOFM_FEEDFORWARD_OUTPUT;
+}
+
+
+/************************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+*************************************************************/
+static void sofm_feedforward_output (void)
+{
+ int idx_out_h, idx_out_w, idx_prev;
+ double sum, temp_f;
+ for (idx_out_h = 0; idx_out_h < (sofm.arch.slabs[1].size.height ); idx_out_h++)
+ {
+ for (idx_out_w = 0; idx_out_w < (sofm.arch.slabs[1].size.width ); idx_out_w++)
+ { // loop throught the neurons of the output layer
+ sum = 0.0;
+ for ( idx_prev = 0; idx_prev < (sofm.arch.slabs[0].size.width ); idx_prev++)
+ { // loop through the neurons of the input layer
+ temp_f = (sofm.arch.slabs[0].neurons[0][idx_prev].out - sofm.arch.slabs[1].neurons[idx_out_h][idx_out_w].w[idx_prev] );
+ sum += (temp_f * temp_f);
+ }
+ temp_f = sqrt(sum);
+ sofm.arch.slabs[1].neurons[idx_out_h][idx_out_w].in = temp_f;
+ sofm.arch.slabs[1].neurons[idx_out_h][idx_out_w].out = activate_function(temp_f,sofm.arch.slabs[1].neurons[idx_out_h][idx_out_w].neuron_function);
+ }
+ }
+ sofm_cur_state = SOFM_WINNING_NEURON;
+}
+
+
+/************************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+*************************************************************/
+static void sofm_winning_neuron (void)
+{
+ int idx_h, idx_w;
+ SOFM_2D_Size_Type temp_w;
+ float min_v = 1000.0;
+
+ for (idx_h = 0; idx_h < (sofm.arch.slabs[1].size.height); idx_h++)
+ {
+ for (idx_w = 0; idx_w < (sofm.arch.slabs[1].size.width); idx_w++)
+ { // loop through the neurons in output layer
+ if ((sofm.arch.slabs[1].neurons[idx_h][idx_w].out - sofm.arch.slabs[1].neurons[idx_h][idx_w].b_v) < min_v)
+ {
+ min_v = sofm.arch.slabs[1].neurons[idx_h][idx_w].out - sofm.arch.slabs[1].neurons[idx_h][idx_w].b_v;
+ temp_w.height = idx_h;
+ temp_w.width = idx_w;
+ }
+ }
+ }
+ sofm.arch.slabs[1].neurons[sofm.env.winner.height][sofm.env.winner.width].w_s = 0; // clear the old winner status
+ sofm.env.winner.height = temp_w.height; // assign new winner
+ sofm.env.winner.width = temp_w.width;
+ sofm.arch.slabs[1].neurons[sofm.env.winner.height][sofm.env.winner.width].w_s = 1; // set new winner status
+
+ if (sofm.env.operation_mode == NN_TRAINING)
+ {
+ sofm_cur_state = SOFM_UPDATE_NEIGHBORHOOD;
+ }
+ else
+ { // recall
+ update_recall_result();
+ sofm_cur_state = SOFM_NEXT_PATTERN;
+ }
+}
+
+/************************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+*************************************************************/
+static void sofm_next_pattern (void)
+{
+ if (sofm.env.operation_mode == NN_TRAINING)
+ {
+ if (sofm.env.train_mode == SOFM_RANDOM_MODE)
+ { // random training
+ sofm.env.cur_pat = rand()%(sofm.env.max_tra_pat); // next random pattern index
+ }
+ else
+ { // sequential training
+ if (++sofm.env.cur_pat >= sofm.env.max_tra_pat)
+ {
+ sofm.env.cur_pat = 0;
+ }
+ }
+ if ((++sofm.env.pat_counter) <sofm.env.max_tra_pat)
+ { // add other termination criterion here
+ sofm_cur_state = SOFM_UPDATE_LEARNING_RATE;
+ }
+ else
+ {
+ sofm_cur_state = SOFM_NEXT_ITERATION;
+ }
+ }
+ else // recall
+ {
+ if ((++sofm.env.cur_pat) < patset.size)
+ {
+ sofm_cur_state = SOFM_GET_PATTERN;
+ }
+ else
+ {
+ sofm_cur_state = SOFM_RECALL_DONE;
+ }
+ }
+}
+
+/************************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+*************************************************************/
+static void sofm_next_iteration (void)
+{
+ sofm.env.pat_counter = 0;
+ if ((++sofm.env.cur_ite) < sofm.env.max_ite)
+ { // add other termination criterion here
+ sofm_cur_state = SOFM_UPDATE_LEARNING_RATE;
+ }
+ else
+ {
+ sofm.env.pat_counter = 0;
+ sofm_cur_state = SOFM_TRAINING_DONE;
+ }
+}
+
+/************************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+*************************************************************/
+static void sofm_update_neighborhood (void)
+{
+ static int temp_c;
+
+ temp_c = sofm.env.cur_ite/ite_per_update_neighbor.height;
+ sofm.env.cur_neighbor.height = sofm.env.neighbor.height - temp_c;
+//printf("temp_c = %d\theight = %d\n",temp_c, sofm.env.cur_neighbor.height);
+ temp_c = sofm.env.cur_ite/ite_per_update_neighbor.width;
+ sofm.env.cur_neighbor.width = sofm.env.neighbor.width - temp_c;
+//printf("temp_c = %d\theight = %d\n",temp_c, sofm.env.cur_neighbor.height);
+ if (sofm.env.cur_neighbor.height < 0)
+ {
+ sofm.env.cur_neighbor.height = 0;
+ }
+ if (sofm.env.cur_neighbor.width < 0)
+ {
+ sofm.env.cur_neighbor.width = 0;
+ }
+ sofm_cur_state = SOFM_WEIGHT_CHANGE;
+}
+
+/************************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+*************************************************************/
+static void sofm_weight_change (void)
+{
+ int idx_pn, idx_h, idx_w;
+ int n_h,n_w;
+
+ for (idx_pn = 0; idx_pn < (sofm.arch.slabs[0].size.width) ; idx_pn++)
+ { // loop through the connect weights of the current neurons
+ for (idx_h = -(sofm.env.cur_neighbor.height); idx_h <= (sofm.env.cur_neighbor.height ) ; idx_h++ )
+ {
+ n_h = sofm.env.winner.height + idx_h;
+ if (n_h < 0)
+ {
+ n_h += sofm.arch.slabs[1].size.height;
+ }
+ else if (n_h >= sofm.arch.slabs[1].size.height)
+ {
+ n_h -= sofm.arch.slabs[1].size.height;
+ }
+ for (idx_w = -(sofm.env.cur_neighbor.width); idx_w <= (sofm.env.cur_neighbor.width ) ; idx_w++ )
+ {
+ n_w = sofm.env.winner.width + idx_w;
+ if (n_w < 0)
+ {
+ n_w += sofm.arch.slabs[1].size.width;
+ }
+ else if (n_w >= sofm.arch.slabs[1].size.width)
+ {
+ n_w -= sofm.arch.slabs[1].size.width;
+ }
+ sofm.arch.slabs[1].neurons[n_h][n_w].delta_w[idx_pn] = sofm.arch.slabs[0].neurons[0][idx_pn].out - sofm.arch.slabs[1].neurons[n_h][n_w].w[idx_pn];
+ sofm.arch.slabs[1].neurons[n_h][n_w].delta_w[idx_pn] *= (sofm.env.eta * neighbor_func(idx_h,idx_w));
+
+ sofm.arch.slabs[1].neurons[n_h][n_w].w[idx_pn] += sofm.arch.slabs[1].neurons[n_h][n_w].delta_w[idx_pn];
+ }
+ }
+
+
+ }
+ sofm_cur_state = SOFM_NEXT_PATTERN;
+}
+
+/************************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+*************************************************************/
+static float neighbor_func (int height, int width)
+{
+ int temp_i;
+ float result;
+
+ temp_i = (height > width)?height:width;
+ switch (sofm.env.neighbor_function)
+ {
+ case CHEF_HAT:
+ result = chef_hat(temp_i);
+ break;
+ case MEXICAN_HAT:
+ result = mexican_hat(temp_i);
+ break;
+ case STOVEPIPE_HAT:
+ result = stovepipe_hat(temp_i);
+ break;
+ default:
+ printf("need to specify neighborhood function\n");
+ exit(1);
+ break;
+ }
+ return(result);
+}
+
+/************************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+*************************************************************/
+static float chef_hat (int distance)
+{
+ if (distance <= NEIGHBORHODD_DISTANCE)
+ return(1.0);
+ else
+ return(0.0);
+}
+
+/************************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+*************************************************************/
+static float mexican_hat (int distance)
+{
+ printf("Mexican hat function is not implemeted\n");
+ exit(1);
+ return(1.0);
+}
+
+/************************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+*************************************************************/
+static float stovepipe_hat (int distance)
+{
+ printf("Stovepipe hat function is not implemeted\n");
+ exit(1);
+ return(1.0);
+}
+
+/************************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+*************************************************************/
+static void sofm_update_learning_rate (void)
+{
+ sofm.env.eta *= sofm.env.shrink;
+ sofm_cur_state = SOFM_UPDATE_CONSCIENCE_FACTOR;
+}
+
+/************************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+*************************************************************/
+static void sofm_update_conscience_factor (void)
+{
+ int idx_h, idx_w;
+ float temp_f;
+ if (sofm.env.conscience == SOFM_CONSCIENCE)
+ {
+ for (idx_h = 0; idx_h < (sofm.arch.slabs[1].size.height); idx_h++)
+ {
+ for (idx_w = 0; idx_w < (sofm.arch.slabs[1].size.width); idx_w++)
+ { // loop through the neurons in output layer
+ temp_f = sofm.arch.slabs[1].neurons[idx_h][idx_w].c_f;
+ sofm.arch.slabs[1].neurons[idx_h][idx_w].c_f = temp_f + sofm.env.beta * (sofm.arch.slabs[1].neurons[idx_h][idx_w].w_s - temp_f);
+ sofm.arch.slabs[1].neurons[idx_h][idx_w].b_v = sofm.env.gama * (1.0/(sofm.arch.slabs[1].size.height * sofm.arch.slabs[1].size.width) - sofm.arch.slabs[1].neurons[idx_h][idx_w].c_f );
+ }
+ }
+ }
+ sofm_cur_state = SOFM_GET_PATTERN;
+}
+
+/************************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+*************************************************************/
+static void sofm_training_done (void)
+{
+ sofm_weight_normalization();
+}
+
+/************************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+*************************************************************/
+static void sofm_recall_done (void)
+{
+
+}
+
+/************************************************************/
+/* neuron activation functions */
+/************************************************************/
+
+/************************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+*************************************************************/
+static float activate_function(float sum, int index)
+{
+ float res;
+ switch (index)
+ {
+ case NN_LINEAR_FUNCTION:
+ res = nn_linear_function(sum);
+ break;
+ case NN_GAUSIAN_FUNCTION:
+ res = nn_gausian_function(sum);
+ break;
+ case NN_SIGMOID_FUNCTION:
+ res = nn_sigmoid_function(sum);
+ break;
+ default:
+ break;
+ }
+ return (res);
+}
+
+/************************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+*************************************************************/
+static float nn_linear_function (float sum)
+{
+ float res;
+
+ res = sum;
+ return (res);
+}
+
+/************************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+*************************************************************/
+static float nn_gausian_function (float sum)
+{
+ float res;
+ res = sum; // No Guassian in SOFM
+ return (res);
+}
+
+/************************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+*************************************************************/
+static float nn_sigmoid_function (float sum)
+{
+ float res;
+ res = sum; // No sigmoid in SOFM
+ return (res);
+}
+
diff --git a/sofm/sofm.h b/sofm/sofm.h
new file mode 100644
index 0000000..a562867
--- /dev/null
+++ b/sofm/sofm.h
@@ -0,0 +1,143 @@
+#ifndef SOFM_NN_H
+#define SOFM_NN_H
+
+#include "nnet.h"
+
+/**************************************************************/
+/* Constants and Macros */
+/**************************************************************/
+
+// Listing 6.12 Data type definiton for SOFM
+/**************************************************************/
+/* Enumerations */
+/**************************************************************/
+typedef enum SOFM_Training_Mode_Tag
+{
+ SOFM_RANDOM_MODE,
+ SOFM_SEQUENTIAL_MODE,
+ NUM_SOFM_TRAINING_MODES
+} SOFM_Training_Mode_Type;
+
+
+typedef enum SOFM_State_Tag
+{
+ SOFM_GET_PATTERN,
+ SOFM_WEIGHT_NORMALIZATION,
+ SOFM_FEEDFORWARD_INPUT,
+ SOFM_FEEDFORWARD_OUTPUT,
+ SOFM_WINNING_NEURON,
+ SOFM_UPDATE_NEIGHBORHOOD,
+ //SOFM_WEIGHT_STEP_CHANGE,
+ SOFM_WEIGHT_CHANGE,
+ SOFM_NEXT_PATTERN,
+ SOFM_NEXT_ITERATION,
+ SOFM_UPDATE_LEARNING_RATE,
+ SOFM_UPDATE_CONSCIENCE_FACTOR,
+ SOFM_TRAINING_DONE,
+ SOFM_RECALL_DONE,
+ NUM_SOFM_STATES
+} SOFM_State_Type;
+
+typedef enum SOFM_Conscience_Type_Tag
+{
+ SOFM_NO_CONSCIENCE,
+ SOFM_CONSCIENCE,
+ NUM_SOFM_CONSCIENCE
+} SOFM_Conscience_Type;
+
+typedef enum Neighbor_Function_Type_Tag
+{
+ CHEF_HAT,
+ MEXICAN_HAT,
+ STOVEPIPE_HAT,
+ NUM_NEIGHBOR_FUNC
+} Neighbor_Function_Type;
+
+/**************************************************************/
+/* Structures */
+/**************************************************************/
+typedef struct SOFM_2D_Size_Type_Tag
+{ // rectangular
+ int width; // horizontal length of the neighborhood
+ int height; // vertical length of the neighborhood
+} SOFM_2D_Size_Type;
+
+typedef struct Neuron_Type_Tag
+{
+ NN_Function_Type neuron_function; // neuron function
+ float in; // neuron input
+ float out; // neuron output, euclidean distance for output neuron
+ FVECTOR w; // connection weights from the previous layers
+ FVECTOR delta_w; // step change of weights
+ float c_f; // conscience factor
+ float b_v; // bias value
+ int w_s; // winner status, y in equation
+} Neuron_Type;
+
+typedef struct NN_Layer_Arch_Type_Tag
+{
+ SOFM_2D_Size_Type size; // size of the slab
+ Neuron_Type **neurons; // pointer to the array of the neurons
+ NN_Layer_Type slab_type;
+} NN_Slab_Arch_Type;
+
+typedef struct SOFM_Arch_Type_Tag
+{
+ int size; // number of layers
+ NN_Slab_Arch_Type *slabs; // pointer to the layers
+} SOFM_Arch_Type;
+
+typedef struct SOFM_Env_Type_Tag
+{
+ NN_Operation_Mode_Type operation_mode; // training or recall
+ SOFM_Training_Mode_Type train_mode; // training mode if in training operation mode
+ float eta; // learning rate
+ float gama; // bias factor
+ float beta; //
+ float shrink; // learning rate (eta) shrinking coefficient
+ float criterion; // training criterion for termination
+ int max_ite; // maximum number of iterations
+ int cur_ite; // current iteration index
+ int max_tra_pat; // total number of training patterns
+ int cur_pat; // current training pattern index
+ int pat_counter; // pattern index increase counter within a iteration
+ SOFM_Conscience_Type conscience; // 0: no conscience, 1: conscience
+ SOFM_2D_Size_Type winner; // index of winning neuron
+ SOFM_2D_Size_Type neighbor; // neighborhood size
+ SOFM_2D_Size_Type cur_neighbor; // current neighborhood size
+ Neighbor_Function_Type neighbor_function; // neighborhood function type
+} SOFM_Env_Type;
+
+typedef struct SOFM_Type_Tag
+{
+ SOFM_Arch_Type arch;
+ SOFM_Env_Type env;
+} SOFM_Type;
+
+typedef struct SOFM_Pattern_Set_Type_Tag
+{
+ int size; // number of patterns
+ int dim_in; // input dimension
+ int dim_out; // output dimension
+ FMATRIX patterns; // pointer to the array of in/out patterns
+} SOFM_Pattern_Set_Type;
+
+/**************************************************************/
+/* Function Prototypes for functions with file level scope */
+/**************************************************************/
+
+
+/**************************************************************/
+/* Const Variable with global level scope */
+/**************************************************************/
+
+
+/**************************************************************/
+/* Function Prototypes for functions with global level scope */
+/**************************************************************/
+
+extern void SOFM_Main_Loop(void);
+extern void SOFM_Start_Up(char *dataFile);
+extern void SOFM_Clean_Up(void);
+
+#endif
|
btbytes/ci
|
452b4ac2a0daf3218d2ad7d948b1767636115da0
|
adding back propagation files
|
diff --git a/bp/Makefile b/bp/Makefile
new file mode 100644
index 0000000..06ceaf5
--- /dev/null
+++ b/bp/Makefile
@@ -0,0 +1,2 @@
+all: bp.h definiti.h headfile.h mem_loc.h nnet.h sigmoid.h
+ gcc -Wall -lm bp.c main.c mem_loc.c sigmoid.c -o bp
diff --git a/bp/bp.c b/bp/bp.c
new file mode 100644
index 0000000..5b23730
--- /dev/null
+++ b/bp/bp.c
@@ -0,0 +1,1131 @@
+#include "headfile.h"
+#include "definiti.h"
+#include "bp.h"
+#include "mem_loc.h"
+#include "sigmoid.h"
+
+
+#define MAX_NUM_CHARS 100
+
+/**************************************************************/
+/* Static Variable and Const Variable with File level scope */
+/**************************************************************/
+ static BP_Type bp;
+ static BP_Pattern_Set_Type patset;
+ static BP_State_Type bp_cur_state;
+
+ static char pat_file_name[MAX_NUM_CHARS];
+ static float *target_out;
+
+/**************************************************************/
+/* Function Prototypes for functions with file level scope */
+/**************************************************************/
+static void read_bp_parameters(char *dataFile); // read BP parameters from I/O file
+static void allocate_bp_memory(void); // allocate BP memory spaces
+static void free_bp_memory(void); // free BP memory spaces
+static void store_bp_results(void); // write BP results to I/O file
+
+static void bp_initialize(void);
+static void bp_initialize_weights(void);
+static void bp_state_handler(int); // BP state handle routine
+
+static void bp_get_pattern(void);
+static void bp_feedforward_input(void);
+static void bp_feedforward_hidden(void);
+static void bp_feedforward_output(void);
+static void bp_back_propagation_output(void);
+static void bp_back_propagation_hiddens(void);
+static void bp_batch_temp_weight_step_change(void);
+static void bp_next_pattern(void);
+static void bp_weight_step_change(void);
+static void bp_weight_change(void);
+static void bp_next_generation(void);
+static void bp_update_learning_rate(void);
+static void bp_update_momentum_rate(void);
+static void bp_training_done(void);
+static void bp_recall_done(void);
+
+static float activate_function(float, int);
+static float nn_linear_function(float);
+static float nn_gausian_function(float);
+static float nn_sigmoid_function(float);
+
+static void print_net_parameters(void);
+static void print_mse(void);
+static void print_recall_result(void);
+
+/**************************************************************/
+/* Function Definitions */
+/**************************************************************/
+
+
+/**************************************************************/
+/* BP Start and clean routines and interfaces */
+/**************************************************************/
+
+/**************************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+***************************************************************/
+
+void BP_Start_Up (char *dataFile)
+{
+ read_bp_parameters(dataFile);
+ allocate_bp_memory(); // allocate memory for BP
+ bp_initialize();
+}
+
+/*************************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+*************************************************************/
+void BP_Clean_Up (void)
+{
+ store_bp_results();
+ free_bp_memory(); // free memory space of BP
+}
+
+
+/************************************************************/
+/* BP functons */
+/************************************************************/
+/************************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+*************************************************************/
+void BP_Main_Loop (void)
+{
+ BOOLEAN running;
+
+ running = TRUE;
+ while (running)
+ {
+ if ((bp_cur_state == BP_TRAINING_DONE) || (bp_cur_state == BP_RECALL_DONE) )
+ {
+ running = FALSE;
+ }
+ bp_state_handler(bp_cur_state);
+ }
+}
+
+/************************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+*************************************************************/
+static void read_bp_parameters (char *dataFile)
+{
+ int idx_i;
+ FILE *frun;
+
+ // open the runfile to input parameters
+ if ((frun=fopen(dataFile,"r"))==NULL)
+ {
+ printf("Cant read run file");
+ exit(1);
+ }
+
+ // read BP's parameters from run file
+ // read BP's environment data
+ fscanf(frun, "%d",&(bp.env.operation_mode)); // training or recall
+ fscanf(frun, "%d",&(bp.env.train_mode)); // training mode if in training operation mode
+ fscanf(frun, "%f",&(bp.env.alpha)); // learning rate
+ fscanf(frun, "%f",&(bp.env.gama)); // momentum rate
+ fscanf(frun, "%f",&(bp.env.criterion)); // training error criterion for termination
+ fscanf(frun, "%d",&(bp.env.max_gen)); // maximum number of generations
+ fscanf(frun, "%d",&(bp.env.max_tra_pat)); // total number of training patterns
+
+ // read BP's Arch
+ fscanf(frun, "%d",&(bp.arch.size)); // number of layers
+ // allocate memory for numbers of neurons in hidden layers
+ bp.arch.hidden_number = calloc((bp.arch.size - 2),sizeof(int));
+ if (!(bp.arch.hidden_number))
+ {
+ printf("Allocation error in read_bp_parameters() - aborting.");
+ exit(1);
+ }
+ // read in numbers of neurons in hidden layers
+ for (idx_i = 0 ; (idx_i < (bp.arch.size - 2)) ; idx_i++)
+ {
+ fscanf(frun, "%d",&(bp.arch.hidden_number[idx_i]));
+ }
+
+ // read in/out pattern parameters
+ fscanf(frun, "%d",&(patset.size)); // number of pattern
+ // read pattern input dimension
+ fscanf(frun, "%d",&(patset.dim_in)); // input dimension
+ // read pattern output dimension
+ fscanf(frun, "%d",&(patset.dim_out)); // output dimension
+ // read pattern file name
+ fscanf(frun, "%s", pat_file_name);
+
+ fclose(frun);
+ //print_net_parameters();
+}
+
+static void print_net_parameters (void)
+{
+ int idx_i;
+ // test
+ printf( "%d\n",(bp.env.operation_mode)); // training or recall
+ printf( "%d\n",(bp.env.train_mode)); // training mode if in training operation mode
+ printf( "%f\n",(bp.env.alpha)); // learning rate
+ printf( "%f\n",(bp.env.gama)); // momentum rate
+ printf( "%f\n",(bp.env.criterion)); // training error criterion for termination
+ printf( "%d\n",(bp.env.max_gen)); // maximum number of generations
+ printf( "%d\n\n",(bp.env.max_tra_pat)); // total number of training patterns
+
+ printf( "%d\n",(bp.arch.size)); // number of layers
+ for (idx_i = 0 ; (idx_i < (bp.arch.size - 2)) ; idx_i++)
+ {
+ printf( "%d\n",(bp.arch.hidden_number[idx_i]));
+ }
+ printf( "%d\n",(patset.size)); // number of pattern
+ // read pattern input dimension
+ printf( "%d\n",(patset.dim_in)); // input dimension
+ // read pattern output dimension
+ printf( "%d\n",(patset.dim_out)); // output dimension
+ // read pattern file name
+ printf( "%s\n", pat_file_name);
+}
+
+static void print_mse (void)
+{
+ printf("%f\n",bp.mse); // output mean squared error
+}
+
+static void print_recall_result(void)
+{
+ int idx_out;
+
+ printf("\nOutput\t");
+ for (idx_out = 0; idx_out < (bp.arch.layers[bp.arch.size - 1].size ); idx_out++)
+ { // loop throught the neurons of the output layer
+ printf("%f\t",bp.arch.layers[bp.arch.size - 1].neurons[idx_out].out);
+ }
+ printf("\nTarget\t");
+ for (idx_out = 0; idx_out < (bp.arch.layers[bp.arch.size - 1].size ); idx_out++)
+ { // loop throught the neurons of the output layer
+ printf("%f\t",target_out[idx_out]);
+ }
+ printf("\n");
+}
+
+/************************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+*************************************************************/
+static void allocate_bp_memory (void)
+{
+ int idx, idx_i;
+
+ // allocate memory for BP net
+ bp.arch.layers = calloc(bp.arch.size,sizeof(NN_Layer_Arch_Type));
+
+ //allocate memory for input layer
+ bp.arch.layers[0].size = patset.dim_in;
+ bp.arch.layers[0].neurons = calloc(patset.dim_in,sizeof(Neuron_Type));
+ bp.arch.layers[0].layer_type = NN_INPUT_LAYER;
+ // specify and allocate memory for neurons of input layer
+ for (idx =0 ; idx < bp.arch.layers[0].size ; idx++)
+ {
+ bp.arch.layers[0].neurons[idx].neuron_function = NN_LINEAR_FUNCTION;
+ FVectorAllocate(&(bp.arch.layers[0].neurons[idx].delta_w),1);
+ FVectorAllocate(&(bp.arch.layers[0].neurons[idx].temp_delta_w),1);
+ FVectorAllocate(&(bp.arch.layers[0].neurons[idx].w),1);
+ }
+
+ // allocate memory for hidden layers
+ for (idx =0 ; idx < (bp.arch.size - 2);idx++ )
+ {
+ bp.arch.layers[idx + 1].size = bp.arch.hidden_number[idx];
+ bp.arch.layers[idx + 1].neurons = calloc(bp.arch.layers[idx + 1].size,sizeof(Neuron_Type));
+ bp.arch.layers[idx + 1].layer_type = NN_HIDDEN_LAYER;
+ // specify and allocate memory for neurons of hiddem layer
+ for (idx_i =0 ; idx_i < bp.arch.layers[idx + 1].size ; idx_i++)
+ {
+ bp.arch.layers[idx + 1].neurons[idx_i].neuron_function = NN_SIGMOID_FUNCTION;
+ FVectorAllocate(&(bp.arch.layers[idx + 1].neurons[idx_i].delta_w),bp.arch.layers[idx].size + 1); // add one bias
+ FVectorAllocate(&(bp.arch.layers[idx + 1].neurons[idx_i].temp_delta_w),bp.arch.layers[idx].size + 1);
+ FVectorAllocate(&(bp.arch.layers[idx + 1].neurons[idx_i].w),bp.arch.layers[idx].size + 1);
+ }
+ }
+
+ // allocate memory for output layer
+ bp.arch.layers[bp.arch.size - 1].size = patset.dim_out;
+ bp.arch.layers[bp.arch.size - 1].neurons = calloc(patset.dim_out,sizeof(Neuron_Type));
+ bp.arch.layers[bp.arch.size - 1].layer_type = NN_OUTPUT_LAYER;
+ // specify and allocate memory for neurons of output layer
+ for (idx =0 ; idx < bp.arch.layers[bp.arch.size - 1].size ; idx++)
+ {
+ bp.arch.layers[bp.arch.size - 1].neurons[idx].neuron_function = NN_SIGMOID_FUNCTION;
+ FVectorAllocate(&(bp.arch.layers[bp.arch.size - 1].neurons[idx].delta_w),bp.arch.layers[bp.arch.size - 2].size +1);
+ FVectorAllocate(&(bp.arch.layers[bp.arch.size - 1].neurons[idx].temp_delta_w),bp.arch.layers[bp.arch.size - 2].size +1);
+ FVectorAllocate(&(bp.arch.layers[bp.arch.size - 1].neurons[idx].w),bp.arch.layers[bp.arch.size - 2].size + 1);
+ }
+
+ // allocate memory for pattern set
+ FMatrixAllocate(&(patset.patterns),patset.size,(patset.dim_in + patset.dim_out));
+
+ // allocate memory for target output
+ target_out = calloc(patset.dim_out,sizeof(float));
+}
+
+/************************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+*************************************************************/
+static void free_bp_memory (void)
+{
+ int idx, idx_i;
+ // free memory for pattern set
+ FMatrixFree(patset.patterns,patset.size);
+
+ // free memory for BP net
+ // free memory for output layer
+ for (idx =0 ; idx < bp.arch.layers[bp.arch.size - 1].size ; idx++)
+ {
+ free(bp.arch.layers[bp.arch.size - 1].neurons[idx].w);
+ free(bp.arch.layers[bp.arch.size - 1].neurons[idx].delta_w);
+ free(bp.arch.layers[bp.arch.size - 1].neurons[idx].temp_delta_w);
+ }
+ free(bp.arch.layers[bp.arch.size - 1].neurons);
+
+ // free memory for hidden layers
+ for (idx =0 ; idx < (bp.arch.size - 2);idx++ )
+ {
+ for (idx_i =0 ; idx_i < bp.arch.layers[idx + 1].size ; idx_i++)
+ {
+ free(bp.arch.layers[idx + 1].neurons[idx_i].w);
+ free(bp.arch.layers[idx + 1].neurons[idx_i].delta_w);
+ free(bp.arch.layers[idx + 1].neurons[idx_i].temp_delta_w);
+ }
+ free(bp.arch.layers[idx + 1].neurons);
+ }
+ // free memory for input layer
+ for (idx =0 ; idx < bp.arch.layers[0].size ; idx++)
+ {
+ free(bp.arch.layers[0].neurons[idx].w);
+ free(bp.arch.layers[0].neurons[idx].delta_w);
+ free(bp.arch.layers[0].neurons[idx].temp_delta_w);
+ }
+ free(bp.arch.layers[0].neurons);
+
+ free(bp.arch.layers);
+ free(bp.arch.hidden_number);
+
+ free(target_out);
+}
+
+/************************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+*************************************************************/
+static void bp_initialize (void)
+{
+ int idx_i,idx_j;
+ int stime;
+ long ltime;
+ FILE *frun;
+
+ // get the current calendar time
+ ltime = time(NULL);
+ stime = (unsigned) ltime/2;
+ srand(stime);
+
+ // bp read patterns from file
+ // open the pattern file to obtain in/out patterns
+ if ((frun=fopen(pat_file_name,"r"))==NULL)
+ {
+ printf("Cant read pattern file");
+ exit(1);
+ }
+
+ for (idx_i = 0; idx_i < patset.size; idx_i++)
+ {
+ for (idx_j = 0; idx_j < (patset.dim_in + patset.dim_out) ; idx_j++)
+ {
+ fscanf(frun, "%f",&(patset.patterns[idx_i][idx_j]));
+ //printf("%f\t",patset.patterns[idx_i][idx_j]);
+ }
+ //printf("\n");
+ }
+ fclose(frun);
+
+ bp.mse = 0.0; // clean mean squared error
+ // bp randomly initialize weights
+ bp_initialize_weights();
+
+ // bp initial setting
+ bp_cur_state = BP_GET_PATTERN;
+ bp.env.cur_gen = 0;
+ bp.env.cur_pat = 0;
+}
+
+/************************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+*************************************************************/
+static void bp_initialize_weights (void)
+{
+ int idx_layer,idx_neuron,idx_weight;
+ float prob;
+ int idx_cn, idx_pn;
+ FILE *fout;
+
+ if (bp.env.operation_mode == NN_TRAINING)
+ {
+ // initialize weight for the neurons in the hidden layers
+ for (idx_layer = 1; idx_layer < ((bp.arch.size - 1)) ; idx_layer++)
+ {
+ for (idx_neuron = 0; idx_neuron < (bp.arch.layers[idx_layer].size); idx_neuron++)
+ {
+ for (idx_weight = 0; idx_weight < (bp.arch.layers[idx_layer - 1].size + 1); idx_weight++ )
+ {
+ prob = (rand()%1000)/1000.0 - 0.5;
+ bp.arch.layers[idx_layer].neurons[idx_neuron].w[idx_weight] = prob; // (-1,1)
+ bp.arch.layers[idx_layer].neurons[idx_neuron].delta_w[idx_weight] = 0.0;
+ bp.arch.layers[idx_layer].neurons[idx_neuron].temp_delta_w[idx_weight] = 0.0;
+ }
+ }
+ }
+
+ // initialize weight for the neurons in the output layer
+ for (idx_neuron = 0; idx_neuron < (bp.arch.layers[bp.arch.size - 1].size); idx_neuron++)
+ {
+ for (idx_weight = 0; idx_weight < (bp.arch.layers[bp.arch.size - 2].size + 1); idx_weight++ )
+ {
+ prob = (rand()%1000)/1000.0 - 0.5;
+ bp.arch.layers[bp.arch.size - 1].neurons[idx_neuron].w[idx_weight] = prob; // (-1,1)
+ bp.arch.layers[bp.arch.size - 1].neurons[idx_neuron].delta_w[idx_weight] = 0.0;
+ bp.arch.layers[bp.arch.size - 1].neurons[idx_neuron].temp_delta_w[idx_weight] = 0.0;
+ }
+ }
+ //store_bp_results();
+ }
+ else
+ { // RECALL operation mode
+ // read in weights from file
+ // open file for write
+ if ((fout=fopen("BP_res.txt","r"))==NULL)
+ {
+ printf("Cant open file for write BP training results");
+ exit(1);
+ }
+ for (idx_layer = 1; idx_layer < (bp.arch.size) ; idx_layer++ )
+ { // loop through the layers
+ for (idx_cn = 0; idx_cn < (bp.arch.layers[idx_layer].size) ; idx_cn++)
+ { // loop through the neurons in the current layer
+ for (idx_pn = 0; idx_pn <= (bp.arch.layers[idx_layer - 1].size) ; idx_pn++)
+ { // loop through the connect weights of the current neurons
+ fscanf(fout,"%f",&(bp.arch.layers[idx_layer].neurons[idx_cn].w[idx_pn]));
+ }
+ }
+ }
+ fclose(fout);
+ }
+}
+
+/************************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+*************************************************************/
+static void store_bp_results (void)
+{
+ // store weights
+ int idx_layer, idx_cn, idx_pn;
+ FILE *fout;
+
+ // open file for write
+
+ if (bp.env.operation_mode == NN_TRAINING)
+ {
+ if ((fout=fopen("BP_res.txt","w"))==NULL)
+ {
+ printf("Cant open file for reading BP weights");
+ exit(1);
+ }
+ for (idx_layer = 1; idx_layer < (bp.arch.size) ; idx_layer++ )
+ { // loop through the layers
+ for (idx_cn = 0; idx_cn < (bp.arch.layers[idx_layer].size) ; idx_cn++)
+ { // loop through the neurons in the current layer
+ for (idx_pn = 0; idx_pn <= (bp.arch.layers[idx_layer - 1].size) ; idx_pn++)
+ { // loop through the connect weights of the current neurons
+ fprintf(fout,"%f\n",bp.arch.layers[idx_layer].neurons[idx_cn].w[idx_pn]);
+ }
+ }
+ }
+ fprintf(fout,"\n\nmse = %f\n",bp.mse);
+ fclose(fout);
+ }
+ else // RECALL
+ {
+
+ }
+}
+
+/************************************************************/
+/* BP State Handlers */
+/************************************************************/
+
+// Listing 6.5 Main part of the BP state machine
+/************************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+*************************************************************/
+static void bp_state_handler (int state_index)
+{
+ switch (state_index)
+ {
+ case BP_GET_PATTERN:
+ bp_get_pattern();
+ break;
+ case BP_FEEDFORWARD_INPUT:
+ bp_feedforward_input();
+ break;
+ case BP_FEEDFORWARD_HIDDEN:
+ bp_feedforward_hidden();
+ break;
+ case BP_FEEDFORWARD_OUTPUT:
+ bp_feedforward_output();
+ break;
+ case BP_BACK_PROPAGATION_OUTPUT:
+ bp_back_propagation_output();
+ break;
+ case BP_BACK_PROPAGATION_HIDDENS:
+ bp_back_propagation_hiddens();
+ break;
+ case BP_BATCH_TEMP_WEIGHT_STEP_CHANGE:
+ bp_batch_temp_weight_step_change();
+ break;
+ case BP_NEXT_PATTERN:
+ bp_next_pattern();
+ break;
+ case BP_WEIGHT_STEP_CHANGE:
+ bp_weight_step_change();
+ break;
+ case BP_WEIGHT_CHANGE:
+ bp_weight_change();
+ break;
+ case BP_NEXT_GENERATION:
+ bp_next_generation();
+ break;
+ case BP_UPDATE_LEARNING_RATE:
+ bp_update_learning_rate();
+ break;
+ case BP_UPDATE_MOMENTUM_RATE:
+ bp_update_momentum_rate();
+ break;
+ case BP_TRAINING_DONE:
+ bp_training_done();
+ break;
+ case BP_RECALL_DONE:
+ bp_recall_done();
+ break;
+ default:
+ break;
+ }
+}
+
+/************************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+*************************************************************/
+static void bp_get_pattern (void)
+{
+ int idx;
+
+ for (idx = 0; idx < (bp.arch.layers[0].size); idx++)
+ {
+ bp.arch.layers[0].neurons[idx].in = patset.patterns[bp.env.cur_pat][idx];
+ }
+ for (idx = 0; idx < patset.dim_out; idx++ )
+ {
+ target_out[idx] = patset.patterns[bp.env.cur_pat][patset.dim_in + idx];
+ //printf("%d: %f\n",bp.env.cur_pat, target_out[idx]);
+ }
+
+ bp_cur_state = BP_FEEDFORWARD_INPUT;
+}
+
+/************************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+*************************************************************/
+static void bp_feedforward_input(void)
+{
+ int idx;
+
+ for (idx = 0; idx < (bp.arch.layers[0].size); idx++)
+ {
+ bp.arch.layers[0].neurons[idx].out = bp.arch.layers[0].neurons[idx].in;
+ }
+
+ bp_cur_state = BP_FEEDFORWARD_HIDDEN;
+}
+
+/************************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+*************************************************************/
+static void bp_feedforward_hidden (void)
+{
+ int idx, idx_prev,idx_cur;
+ float sum;
+
+ for (idx = 1; idx < ( bp.arch.size - 1); idx++)
+ { // loop through the hidden layers
+ for (idx_cur = 0; idx_cur < (bp.arch.layers[idx].size ); idx_cur++)
+ { // loop throught the neurons of the current hidden layer
+ sum = 0.0;
+ for ( idx_prev = 0; idx_prev < (bp.arch.layers[idx - 1].size ); idx_prev++)
+ { // loop through the outputs of the previous layer
+ sum += (bp.arch.layers[idx -1].neurons[idx_prev].out ) * (bp.arch.layers[idx].neurons[idx_cur].w[idx_prev] );
+ }
+ sum += (bp.arch.layers[idx].neurons[idx_cur].w[bp.arch.layers[idx - 1].size] );
+ bp.arch.layers[idx].neurons[idx_cur].in = sum;
+ bp.arch.layers[idx].neurons[idx_cur].out = activate_function(sum,bp.arch.layers[idx].neurons[idx_cur].neuron_function);
+ }
+ }
+ bp_cur_state = BP_FEEDFORWARD_OUTPUT;
+}
+
+/************************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+*************************************************************/
+static void bp_feedforward_output (void)
+{
+ int idx_out, idx_prev;
+ float sum;
+ for (idx_out = 0; idx_out < (bp.arch.layers[bp.arch.size - 1].size ); idx_out++)
+ { // loop throught the neurons of the output layer
+ sum = 0.0;
+ for ( idx_prev = 0; idx_prev < (bp.arch.layers[bp.arch.size - 2].size ); idx_prev++)
+ { // loop through the outputs of the previous layer
+ sum += (bp.arch.layers[bp.arch.size - 2].neurons[idx_prev].out ) * (bp.arch.layers[bp.arch.size - 1].neurons[idx_out].w[idx_prev] );
+ }
+ sum += (bp.arch.layers[bp.arch.size - 1].neurons[idx_out].w[bp.arch.layers[bp.arch.size - 2].size] );
+ bp.arch.layers[bp.arch.size - 1].neurons[idx_out].in = sum;
+ bp.arch.layers[bp.arch.size - 1].neurons[idx_out].out = activate_function(sum,bp.arch.layers[bp.arch.size - 1].neurons[idx_out].neuron_function);
+ //if (bp.env.operation_mode == NN_RECALL)
+ //{
+ // printf("patten index: %d\t%f\t%f\t%f\n",bp.env.cur_pat,bp.arch.layers[0].neurons[0].in,bp.arch.layers[0].neurons[1].in,bp.arch.layers[bp.arch.size - 1].neurons[idx_out].out);
+ //}
+ }
+ if (bp.env.operation_mode == NN_RECALL)
+ {
+ print_recall_result();
+ }
+ if (bp.env.operation_mode == NN_TRAINING)
+ {
+ bp_cur_state = BP_BACK_PROPAGATION_OUTPUT;
+ }
+ else
+ { // recall
+
+ bp_cur_state = BP_NEXT_PATTERN;
+ }
+}
+
+/************************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+*************************************************************/
+static void bp_back_propagation_output (void)
+{
+ int idx;
+ double tempA,tempB;
+
+ for (idx = 0; idx < (bp.arch.layers[bp.arch.size - 1].size); idx++)
+ {
+ tempA = (target_out[idx] - bp.arch.layers[bp.arch.size - 1].neurons[idx].out);
+ switch (bp.arch.layers[bp.arch.size - 1].neurons[idx].neuron_function)
+ {
+ case NN_LINEAR_FUNCTION:
+ bp.arch.layers[bp.arch.size - 1].neurons[idx].error = tempA;
+ break;
+ case NN_GAUSIAN_FUNCTION:
+ printf("BP net can't have Gaussian Neurons, exit\n");
+ exit(1);
+ break;
+ default: // NN_SIGMOID_FUNCTION
+ tempB = (bp.arch.layers[bp.arch.size - 1].neurons[idx].out) * ( 1.0 - (bp.arch.layers[bp.arch.size - 1].neurons[idx].out));
+ bp.arch.layers[bp.arch.size - 1].neurons[idx].error = tempA * tempB;
+ break;
+ }
+ bp.mse += (tempA * tempA);
+ }
+
+ bp_cur_state = BP_BACK_PROPAGATION_HIDDENS;
+}
+
+/************************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+*************************************************************/
+static void bp_back_propagation_hiddens (void)
+{
+ int idx_l, idx_cn, idx_nn;
+ double tempA,sum;
+
+ for (idx_l = bp.arch.size - 2; idx_l > 0; idx_l--)
+ { // loop through all the hidden layers
+ for (idx_cn = 0; idx_cn < (bp.arch.layers[idx_l].size) ; idx_cn++ )
+ { // loop through all the neurons in the current hidden layer
+ sum = 0.0;
+ for (idx_nn = 0; idx_nn < (bp.arch.layers[idx_l + 1].size); idx_nn++ )
+ { // loop through the next layer's neurons
+ sum += (bp.arch.layers[idx_l + 1].neurons[idx_nn].error) * (bp.arch.layers[idx_l + 1].neurons[idx_nn].w[idx_cn]);
+ }
+ tempA = bp.arch.layers[idx_l].neurons[idx_cn].out * ( 1.0 - (bp.arch.layers[idx_l].neurons[idx_cn].out));
+ bp.arch.layers[idx_l].neurons[idx_cn].error = sum * tempA;
+ }
+
+ }
+
+ bp_cur_state = BP_BATCH_TEMP_WEIGHT_STEP_CHANGE;
+}
+
+/************************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+*************************************************************/
+static void bp_batch_temp_weight_step_change (void)
+{
+ int idx_layer,idx_cn,idx_pn;
+ double tempA;
+
+ for (idx_layer = bp.arch.size - 1; idx_layer > 0; idx_layer--)
+ { // loop through layers
+ for (idx_cn = 0; idx_cn < (bp.arch.layers[idx_layer].size) ; idx_cn++)
+ { // loop neurons in the current layer
+ for (idx_pn = 0; idx_pn < (bp.arch.layers[idx_layer - 1].size) ; idx_pn++)
+ { // loop through neurons in the previous layer
+ tempA = bp.arch.layers[idx_layer].neurons[idx_cn].error * bp.arch.layers[idx_layer - 1].neurons[idx_pn].out;
+ tempA *= bp.env.alpha;
+ bp.arch.layers[idx_layer].neurons[idx_cn].temp_delta_w[idx_pn] += tempA;
+ }
+ bp.arch.layers[idx_layer].neurons[idx_cn].temp_delta_w[bp.arch.layers[idx_layer - 1].size] += bp.env.alpha * bp.arch.layers[idx_layer].neurons[idx_cn].error;
+ }
+ }
+
+ if (bp.env.train_mode == NN_BATCH_MODE)
+ {
+ bp_cur_state = BP_NEXT_PATTERN;
+ }
+ else
+ {
+ bp_cur_state = BP_WEIGHT_STEP_CHANGE;
+ }
+}
+
+/************************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+*************************************************************/
+static void bp_next_pattern (void)
+{
+ bp.env.cur_pat++;
+ if (bp.env.operation_mode == NN_TRAINING)
+ {
+ if (bp.env.train_mode == NN_BATCH_MODE)
+ {
+ if (bp.env.cur_pat < bp.env.max_tra_pat)
+ {
+ bp_cur_state = BP_GET_PATTERN;
+ }
+ else
+ {
+ bp_cur_state = BP_WEIGHT_STEP_CHANGE;
+ }
+ }
+ else // sequential learning
+ {
+ if (bp.env.cur_pat < bp.env.max_tra_pat)
+ {
+ bp_cur_state = BP_GET_PATTERN;
+ }
+ else
+ {
+ bp_cur_state = BP_NEXT_GENERATION;
+ }
+ }
+ }
+ else // recall
+ {
+ if (bp.env.cur_pat < patset.size)
+ {
+ bp_cur_state = BP_GET_PATTERN;
+ }
+ else
+ {
+ bp_cur_state = BP_RECALL_DONE;
+ }
+ }
+
+}
+
+/************************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+*************************************************************/
+static void bp_weight_step_change (void)
+{
+ int idx_layer, idx_cn, idx_pn;
+
+ for (idx_layer = 1; idx_layer < (bp.arch.size) ; idx_layer++ )
+ { // loop through the layers
+ for (idx_cn = 0; idx_cn < (bp.arch.layers[idx_layer].size) ; idx_cn++)
+ { // loop through the neurons in the current layer
+ for (idx_pn = 0; idx_pn <= (bp.arch.layers[idx_layer - 1].size) ; idx_pn++)
+ { // loop through the connect weights of the current neurons
+ bp.arch.layers[idx_layer].neurons[idx_cn].delta_w[idx_pn] *= bp.env.gama;
+ bp.arch.layers[idx_layer].neurons[idx_cn].delta_w[idx_pn] += (bp.arch.layers[idx_layer].neurons[idx_cn].temp_delta_w[idx_pn]); // /(bp.env.max_tra_pat);
+ bp.arch.layers[idx_layer].neurons[idx_cn].temp_delta_w[idx_pn] = 0.0;
+ }
+ }
+ }
+ bp_cur_state = BP_WEIGHT_CHANGE;
+}
+
+/************************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+*************************************************************/
+static void bp_weight_change (void)
+{
+ int idx_layer, idx_cn, idx_pn;
+
+ for (idx_layer = 1; idx_layer < (bp.arch.size) ; idx_layer++ )
+ { // loop through the layers
+ for (idx_cn = 0; idx_cn < (bp.arch.layers[idx_layer].size) ; idx_cn++)
+ { // loop through the neurons in the current layer
+ for (idx_pn = 0; idx_pn <= (bp.arch.layers[idx_layer - 1].size) ; idx_pn++)
+ { // loop through the connect weights of the current neurons
+ bp.arch.layers[idx_layer].neurons[idx_cn].w[idx_pn] += bp.arch.layers[idx_layer].neurons[idx_cn].delta_w[idx_pn];
+ }
+ }
+ }
+
+ if (bp.env.train_mode == NN_BATCH_MODE)
+ {
+ bp_cur_state = BP_NEXT_GENERATION;
+ }
+ else
+ {
+ bp_cur_state = BP_NEXT_PATTERN;
+ }
+}
+
+/************************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+*************************************************************/
+static void bp_next_generation (void)
+{
+ int idx_layer, idx_cn; // idx_pn;
+
+ for (idx_layer = 0; idx_layer < (bp.arch.size) ; idx_layer++ )
+ { // loop through the layers
+ for (idx_cn = 0; idx_cn < (bp.arch.layers[idx_layer].size) ; idx_cn++)
+ { // loop through the neurons in the current layer
+ // clean the error
+ bp.arch.layers[idx_layer].neurons[idx_cn].error = 0.0;
+ //if (idx_layer >0)
+ //{ // clean temp. step weight weights
+ // for (idx_pn = 0; idx_pn <= (bp.arch.layers[idx_layer - 1].size) ; idx_pn++)
+ // { // loop through the connect weights of the current neurons
+ // bp.arch.layers[idx_layer].neurons[idx_cn].temp_delta_w[idx_pn] = 0.0;
+ // }
+ //}
+ }
+ }
+
+ bp.mse /= bp.env.max_tra_pat;
+ print_mse();
+
+ //bp.env.cur_gen++;
+
+ if ((++bp.env.cur_gen) < bp.env.max_gen) // add error criterion later
+ {
+ bp.mse = 0.0; //clear mean squared error
+ bp_cur_state = BP_UPDATE_LEARNING_RATE;
+ }
+ else
+ {
+ bp_cur_state = BP_TRAINING_DONE;
+ }
+}
+
+/************************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+*************************************************************/
+static void bp_update_learning_rate (void)
+{
+ bp_cur_state = BP_UPDATE_MOMENTUM_RATE;
+}
+
+/************************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+*************************************************************/
+static void bp_update_momentum_rate (void)
+{
+ bp.env.cur_pat = 0;
+ bp_cur_state = BP_GET_PATTERN;
+}
+
+/************************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+*************************************************************/
+static void bp_training_done (void)
+{
+
+}
+
+/************************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+*************************************************************/
+static void bp_recall_done (void)
+{
+
+}
+
+/************************************************************/
+/* neuron activation functions */
+/************************************************************/
+
+/************************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+*************************************************************/
+static float activate_function(float sum, int index)
+{
+ float res;
+ switch (index)
+ {
+ case NN_LINEAR_FUNCTION:
+ res = nn_linear_function(sum);
+ break;
+ case NN_GAUSIAN_FUNCTION:
+ res = nn_gausian_function(sum);
+ break;
+ case NN_SIGMOID_FUNCTION:
+ res = nn_sigmoid_function(sum);
+ break;
+ default:
+ break;
+ }
+ return (res);
+}
+
+/************************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+*************************************************************/
+static float nn_linear_function (float sum)
+{
+ float res;
+
+ res = sum;
+ return (res);
+}
+
+/************************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+*************************************************************/
+static float nn_gausian_function (float sum)
+{
+ float res;
+
+ return (res);
+}
+
+/************************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+*************************************************************/
+static float nn_sigmoid_function (float sum)
+{
+ float res;
+ res = (float)sigmoid(sum);
+ return (res);
+}
diff --git a/bp/bp.h b/bp/bp.h
new file mode 100644
index 0000000..8ef701d
--- /dev/null
+++ b/bp/bp.h
@@ -0,0 +1,118 @@
+#ifndef BP_NN_H
+#define BP_NN_H
+
+#include "nnet.h"
+
+/**************************************************************/
+/* Constants and Macros */
+/**************************************************************/
+
+/**************************************************************/
+/* Enumerations */
+/**************************************************************/
+
+// Listing 6.2 Enumeration data type for BP net
+typedef enum BP_Training_Mode_Tag
+{
+ NN_BATCH_MODE,
+ NN_SEQUENTIAL_MODE,
+ NUM_NN_TRAINING_MODES
+} BP_Training_Mode_Type;
+
+typedef enum BP_State_Tag
+{
+ BP_GET_PATTERN,
+ BP_FEEDFORWARD_INPUT,
+ BP_FEEDFORWARD_HIDDEN,
+ BP_FEEDFORWARD_OUTPUT,
+ BP_BACK_PROPAGATION_OUTPUT,
+ BP_BACK_PROPAGATION_HIDDENS,
+ BP_BATCH_TEMP_WEIGHT_STEP_CHANGE,
+ BP_NEXT_PATTERN,
+ BP_WEIGHT_STEP_CHANGE,
+ BP_WEIGHT_CHANGE,
+ BP_NEXT_GENERATION,
+ BP_UPDATE_LEARNING_RATE,
+ BP_UPDATE_MOMENTUM_RATE,
+ BP_TRAINING_DONE,
+ BP_RECALL_DONE,
+ NUM_BP_STATES
+} BP_State_Type;
+
+// Listing 6.3 Structure data type definitions for BP net
+/**************************************************************/
+/* Structures */
+/**************************************************************/
+typedef struct Neuron_Type_Tag
+{
+ NN_Function_Type neuron_function; // neuron function
+ float in; // neuron input
+ float out; // neuron output
+ double error; // error of neuron's output
+ FVECTOR delta_w; // step change of weights
+ FVECTOR temp_delta_w; // temp. step change of weights
+ FVECTOR w; // connection weights from the previous layers
+} Neuron_Type;
+
+typedef struct NN_Layer_Arch_Type_Tag
+{
+ int size; // number of neurons in the layer
+ Neuron_Type *neurons; // pointer to the array of the neurons
+ NN_Layer_Type layer_type;
+} NN_Layer_Arch_Type;
+
+typedef struct BP_Arch_Type_Tag
+{
+ int size; // number of layers
+ NN_Layer_Arch_Type *layers; // pointer to the layers
+ int *hidden_number; // pointer to the neuron numbers of hidden layers
+} BP_Arch_Type;
+
+typedef struct BP_Env_Type_Tag
+{
+ NN_Operation_Mode_Type operation_mode; // training or recall
+ BP_Training_Mode_Type train_mode; // training mode if in training operation mode
+ float alpha; // learning rate 0.075
+ float gama; // momentum rate 0.15
+ float criterion; // training error criterion for termination
+ int max_gen; // maximum number of generations
+ int cur_gen; // current generation index
+ int max_tra_pat; // total number of training patterns
+ int cur_pat; // current training pattern index
+
+} BP_Env_Type;
+
+typedef struct BP_Type_Tag
+{
+ BP_Arch_Type arch;
+ BP_Env_Type env;
+ double mse; // mean squared error
+} BP_Type;
+
+typedef struct BP_Pattern_Set_Type_Tag
+{
+ int size; // number of patterns
+ int dim_in; // input dimension
+ int dim_out; // output dimension
+ FMATRIX patterns; // pointer to the array of in/out patterns
+} BP_Pattern_Set_Type;
+
+/**************************************************************/
+/* Function Prototypes for functions with file level scope */
+/**************************************************************/
+
+
+/**************************************************************/
+/* Const Variable with global level scope */
+/**************************************************************/
+
+
+/**************************************************************/
+/* Function Prototypes for functions with global level scope */
+/**************************************************************/
+
+extern void BP_Main_Loop(void);
+extern void BP_Start_Up(char *dataFile);
+extern void BP_Clean_Up(void);
+
+#endif
diff --git a/bp/definiti.h b/bp/definiti.h
new file mode 100644
index 0000000..e35e944
--- /dev/null
+++ b/bp/definiti.h
@@ -0,0 +1,14 @@
+#ifndef DEFINITION_H
+#define DEFINITION_H
+
+typedef enum BOOLEAN_Tag {FALSE, TRUE} BOOLEAN;
+
+typedef int *P_INT;
+typedef P_INT IVECTOR;
+typedef P_INT *IMATRIX;
+
+typedef float *P_FLOAT;
+typedef P_FLOAT FVECTOR;
+typedef P_FLOAT *FMATRIX;
+
+#endif
diff --git a/bp/headfile.h b/bp/headfile.h
new file mode 100644
index 0000000..cca8e6e
--- /dev/null
+++ b/bp/headfile.h
@@ -0,0 +1,10 @@
+#ifndef __HEADFILE_H__
+#define __HEADFILE_H__
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <math.h>
+#include <string.h>
+#include <time.h> //YS 01/16/98
+
+#endif
diff --git a/bp/iris_bp.run b/bp/iris_bp.run
new file mode 100644
index 0000000..d293e86
--- /dev/null
+++ b/bp/iris_bp.run
@@ -0,0 +1,14 @@
+0
+0
+0.075
+0.15
+0.01
+10000
+99
+
+3
+4
+150
+4
+3
+iris.dat
diff --git a/bp/main.c b/bp/main.c
new file mode 100644
index 0000000..244e73d
--- /dev/null
+++ b/bp/main.c
@@ -0,0 +1,49 @@
+#include "headfile.h"
+#include "bp.h"
+#include "mem_loc.h"
+
+/**************************************/
+/* Constants and Macros */
+/**************************************/
+#define NUM_RUN 50
+
+static void main_start_up(char *dataFile);
+static void main_clean_up(void);
+
+// Listing 6.4 Back-propagation main() routine
+int main (int argc, char *argv[])
+{
+ int idx_i;
+ // global variable definitions
+
+
+ // check command line
+ if (argc != 2)
+ {
+ printf("Usage: exe_file run_file");
+ exit(1);
+ }
+
+ // initialize
+ main_start_up(argv[1]);
+
+ // run
+ for (idx_i = 0; idx_i < NUM_RUN ; idx_i++)
+ {
+ BP_Main_Loop();
+ }
+ // clean up memory space
+ main_clean_up();
+ return 0;
+}
+
+static void main_start_up (char *dataFile)
+{
+ BP_Start_Up(dataFile);
+
+}
+
+static void main_clean_up (void)
+{
+ BP_Clean_Up();
+}
diff --git a/bp/mem_loc.c b/bp/mem_loc.c
new file mode 100644
index 0000000..81d35c4
--- /dev/null
+++ b/bp/mem_loc.c
@@ -0,0 +1,98 @@
+#include "headfile.h"
+#include "mem_loc.h"
+
+/**********************************************************************
+ If you want to allocate a block larger than 64K, you must use
+ farcalloc instead of calloc
+**********************************************************************/
+
+/* Memory allocation functions for integer matrix and vector */
+
+void IVectorAllocate(IVECTOR *ivector, int nCols)
+{
+ if ((*ivector = (IVECTOR) calloc(nCols, sizeof(long int))) == NULL)
+ {
+ fprintf(stderr, "Sorry! Not enough memory for vector\n");
+ exit(1);
+ }
+}
+
+/* Allocate space for columns (int cells) for
+ dynamic two dimensional matrix[rows][cols]
+*/
+
+void IAllocateCols(P_INT imatrix[], int nRows, int nCols)
+{
+ int i;
+ for (i = 0; i < nRows; i++)
+ IVectorAllocate(&imatrix[i], nCols);
+}
+
+/* Allocate space for a two dimensional dynamic matrix [rows] [cols]
+*/
+
+void IMatrixAllocate(IMATRIX *ipmatrix, int nRows, int nCols)
+{
+ if ( (*ipmatrix = (IMATRIX) calloc(nRows, sizeof(long int) ) ) == NULL)
+ {
+ fprintf(stderr, "Sorry! Not enough memory for matrix\n");
+ exit(1);
+ }
+
+ IAllocateCols(*ipmatrix, nRows, nCols);
+}
+
+/* free space for two dimensional dynamic array */
+void IMatrixFree(IMATRIX imatrix, int nRows)
+{
+ int i;
+ for (i = 0; i < nRows; i++)
+ free(imatrix[i]);
+ free(imatrix);
+}
+
+/* *************** Float routines *************** */
+
+void FVectorAllocate(FVECTOR *fvector, int nCols)
+{
+ if ((*fvector = (FVECTOR) calloc(nCols, sizeof(float))) == NULL)
+ {
+ fprintf(stderr, "Sorry! Not enough memory for fvector\n");
+ exit(1);
+ }
+}
+
+/* Allocate space for columns (int cells) for
+ dynamic two dimensional matrix[rows][cols]
+*/
+
+void FAllocateCols(P_FLOAT fmatrix[], int nRows, int nCols)
+{
+ int i;
+ for (i = 0; i < nRows; i++)
+ FVectorAllocate(&fmatrix[i], nCols);
+}
+
+/* Allocate space for a two dimensional dynamic matrix [rows] [cols]
+*/
+
+void FMatrixAllocate(FMATRIX *fpmatrix, int nRows, int nCols)
+{
+ if ( (*fpmatrix = (FMATRIX) calloc(nRows, sizeof(float))) == NULL)
+ {
+ fprintf(stderr, "Sorry! Not enough memory for fmatrix\n");
+ exit(1);
+ }
+
+ FAllocateCols(*fpmatrix, nRows, nCols);
+}
+
+/* free space for two dimensional dynamic array */
+void FMatrixFree(FMATRIX fmatrix, int nRows)
+{
+ int i;
+ for (i = 0; i < nRows; i++)
+ free(fmatrix[i]);
+ free(fmatrix);
+}
+
diff --git a/bp/mem_loc.h b/bp/mem_loc.h
new file mode 100644
index 0000000..e79b2cd
--- /dev/null
+++ b/bp/mem_loc.h
@@ -0,0 +1,17 @@
+#ifndef __MEM_LOC_H__
+#define __MEM_LOC_H__
+
+#include "definiti.h"
+
+extern void IVectorAllocate(IVECTOR *ivector, int nCols);
+extern void IMatrixFree(IMATRIX imatrix, int nRows);
+extern void IMatrixAllocate(IMATRIX *ipmatrix, int nRows, int nCols);
+extern void IAllocateCols(P_INT imatrix[], int nRows, int nCols);
+
+extern void FVectorAllocate(FVECTOR *fvector, int nCols);
+extern void FMatrixFree(FMATRIX fmatrix, int nRows);
+extern void FMatrixAllocate(FMATRIX *fpmatrix, int nRows, int nCols);
+extern void FAllocateCols(P_FLOAT fmatrix[], int nRows, int nCols);
+
+#endif
+
diff --git a/bp/nnet.h b/bp/nnet.h
new file mode 100644
index 0000000..969f50a
--- /dev/null
+++ b/bp/nnet.h
@@ -0,0 +1,34 @@
+#ifndef NEURAL_NET_H
+#define NEURAL_NET_H
+
+#include "definiti.h"
+
+/**************************************************************/
+/* Enumerations */
+/**************************************************************/
+
+// Listing 6.1 Enumeration data type definitions for neural networks
+typedef enum NN_Operation_Mode_Type_Tag
+{
+ NN_TRAINING,
+ NN_RECALL,
+ NUM_BP_OPERATION_MODES
+} NN_Operation_Mode_Type;
+
+typedef enum NN_Function_Type_Tag
+{
+ NN_LINEAR_FUNCTION,
+ NN_GAUSIAN_FUNCTION,
+ NN_SIGMOID_FUNCTION,
+ NUM_NN_FUNCTION_TYPES
+} NN_Function_Type;
+
+typedef enum NN_Layer_Type_Tag
+{
+ NN_INPUT_LAYER,
+ NN_HIDDEN_LAYER,
+ NN_OUTPUT_LAYER,
+ NUM_NN_LAYERS
+} NN_Layer_Type;
+
+#endif
diff --git a/bp/sigmoid.c b/bp/sigmoid.c
new file mode 100644
index 0000000..3c3683e
--- /dev/null
+++ b/bp/sigmoid.c
@@ -0,0 +1,16 @@
+#include "math.h"
+#include "sigmoid.h"
+
+#define Beta 1
+
+double
+sigmoid(double x)
+{
+ x=-1*Beta*x;
+ x=exp(x);
+ x+=1.0;
+ x=1/x;
+ return(x);
+}
+
+
diff --git a/bp/sigmoid.h b/bp/sigmoid.h
new file mode 100644
index 0000000..5e06274
--- /dev/null
+++ b/bp/sigmoid.h
@@ -0,0 +1,6 @@
+#ifndef __SIGMOID_H_
+#define __SIGMOID_H_
+
+double sigmoid(double x);
+
+#endif
|
btbytes/ci
|
0dfcb36ad39d89be32495db294d5f0b67bc1b58a
|
adding a sample run file
|
diff --git a/psos/psos.run b/psos/psos.run
new file mode 100644
index 0000000..ee399fc
--- /dev/null
+++ b/psos/psos.run
@@ -0,0 +1,54 @@
+2
+1
+300
+
+
+0
+0
+1
+1
+0.0
+50.0
+10
+100
+100
+30
+13
+0.9
+1
+0 1.0
+0 1.0
+0 1.0
+0 1.0
+0 1.0
+0 1.0
+0 1.0
+0 1.0
+0 1.0
+0 100.0
+0 100.0
+0 100.0
+0 1.0
+
+1
+7
+1
+1
+0.0
+1.0
+0.5
+1
+70
+20
+9
+0.9
+1
+0 1.0
+0 1.0
+0 1.0
+0 1.0
+0 1.0
+0 1.0
+0 1.0
+0 1.0
+0 1.0
|
btbytes/ci
|
4428916c6188692ada0df9f5941a70647fcc9862
|
added newline
|
diff --git a/psos/main.c b/psos/main.c
index 419d0c7..b771ceb 100644
--- a/psos/main.c
+++ b/psos/main.c
@@ -1,49 +1,49 @@
#include "headfile.h"
#include "psostate.h"
#include "mem_loc.h"
/**************************************/
/* Constants and Macros */
/**************************************/
#define NUM_RUN 1
static void main_start_up(char *dataFile);
static void main_clean_up(void);
// Listing 4.17 PSO main() routine
int main (int argc, char *argv[])
{
int idx_i;
// global variable definitions
// check command line
if (argc != 2)
{
- printf("Usage: exe_file run_file");
+ printf("Usage: exe_file run_file\n");
exit(1);
}
// initialize
main_start_up(argv[1]);
// run PSO
for (idx_i = 0; idx_i < NUM_RUN ; idx_i++)
{
PSO_Main_Loop();
}
// clean up memory space
main_clean_up();
return 0;
}
static void main_start_up (char *dataFile)
{
PSO_Start_Up(dataFile);
}
static void main_clean_up (void)
{
PSO_Clean_Up();
}
|
btbytes/ci
|
b24779facf0abcdb023d07822aa1cd8e3ea531ac
|
removed conio.h refs
|
diff --git a/lvq/headfile.h b/lvq/headfile.h
index af5c635..cca8e6e 100644
--- a/lvq/headfile.h
+++ b/lvq/headfile.h
@@ -1,11 +1,10 @@
#ifndef __HEADFILE_H__
#define __HEADFILE_H__
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <string.h>
-#include <conio.h>
#include <time.h> //YS 01/16/98
#endif
diff --git a/lvq/main.c b/lvq/main.c
index 8d08e6b..3466081 100644
--- a/lvq/main.c
+++ b/lvq/main.c
@@ -1,42 +1,42 @@
#include "headfile.h"
#include "lvq.h"
#include "mem_loc.h"
/**************************************/
/* Constants and Macros */
/**************************************/
static void main_start_up(char *dataFile);
static void main_clean_up(void);
// Listing 6.9 LVQ main() routine
void main (int argc, char *argv[])
{
// check command line
if (argc != 2)
{
- printf("Usage: exe_file run_file");
+ printf("Usage: exe_file run_file\n");
exit(1);
}
// initialize
main_start_up(argv[1]);
// run
LVQ_Main_Loop();
// clean up memory space
main_clean_up();
}
static void main_start_up (char *dataFile)
{
LVQ_Start_Up(dataFile);
}
static void main_clean_up (void)
{
LVQ_Clean_Up();
}
|
btbytes/ci
|
f4b1fdb36865243e8608af7308b4548330f4f160
|
adding lvq files
|
diff --git a/lvq/Makefile b/lvq/Makefile
new file mode 100644
index 0000000..d74bab6
--- /dev/null
+++ b/lvq/Makefile
@@ -0,0 +1,3 @@
+all: definiti.h headfile.h lvq.h mem_loc.h nnet.h
+ gcc -Wall -lm lvq.c main.c mem_loc.c -o lvq
+
diff --git a/lvq/definiti.h b/lvq/definiti.h
new file mode 100644
index 0000000..e35e944
--- /dev/null
+++ b/lvq/definiti.h
@@ -0,0 +1,14 @@
+#ifndef DEFINITION_H
+#define DEFINITION_H
+
+typedef enum BOOLEAN_Tag {FALSE, TRUE} BOOLEAN;
+
+typedef int *P_INT;
+typedef P_INT IVECTOR;
+typedef P_INT *IMATRIX;
+
+typedef float *P_FLOAT;
+typedef P_FLOAT FVECTOR;
+typedef P_FLOAT *FMATRIX;
+
+#endif
diff --git a/lvq/headfile.h b/lvq/headfile.h
new file mode 100644
index 0000000..af5c635
--- /dev/null
+++ b/lvq/headfile.h
@@ -0,0 +1,11 @@
+#ifndef __HEADFILE_H__
+#define __HEADFILE_H__
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <math.h>
+#include <string.h>
+#include <conio.h>
+#include <time.h> //YS 01/16/98
+
+#endif
diff --git a/lvq/lvq.c b/lvq/lvq.c
new file mode 100644
index 0000000..43f07b1
--- /dev/null
+++ b/lvq/lvq.c
@@ -0,0 +1,1012 @@
+#include "headfile.h"
+#include "definiti.h"
+#include "lvq.h"
+#include "mem_loc.h"
+
+#define MAX_NUM_CHARS 100
+
+#define SAVE_CLUSTER 1
+
+/**************************************************************/
+/* Static Variable and Const Variable with File level scope */
+/**************************************************************/
+ static LVQ_Type lvq;
+ static LVQ_Pattern_Set_Type patset;
+ static LVQ_State_Type lvq_cur_state;
+
+ #if SAVE_CLUSTER
+ static IMATRIX cluster_result;
+ #endif
+
+ static char pat_file_name[MAX_NUM_CHARS];
+ static float *target_out;
+ static IMATRIX test_result;
+/**************************************************************/
+/* Function Prototypes for functions with file level scope */
+/**************************************************************/
+static void read_lvq_parameters(char *dataFile); // read LVQ parameters from I/O file
+static void allocate_lvq_memory(void); // allocate LVQ memory spaces
+static void free_lvq_memory(void); // free LVQ memory spaces
+static void store_lvq_results(void); // write LVQ results to I/O file
+
+static void lvq_initialize(void);
+static void lvq_initialize_weights(void);
+static void lvq_state_handler(int); // LVQ state handle routine
+
+static void lvq_get_pattern(void);
+static void lvq_weight_normalization(void);
+static void lvq_feedforward_input(void);
+static void lvq_feedforward_output(void);
+static void lvq_winning_neuron(void);
+static void lvq_weight_step_change(void);
+static void lvq_weight_change(void);
+static void lvq_next_pattern(void);
+static void lvq_next_iteration(void);
+static void lvq_update_learning_rate(void);
+static void lvq_update_conscience_factor(void);
+static void lvq_training_done(void);
+static void lvq_recall_done(void);
+
+static float activate_function(float, int);
+static float nn_linear_function(float);
+static float nn_gausian_function(float);
+static float nn_sigmoid_function(float);
+
+static void print_net_parameters(void);
+static void update_recall_result(void);
+
+/**************************************************************/
+/* Function Definitions */
+/**************************************************************/
+
+
+/**************************************************************/
+/* LVQ Start and clean routines and interfaces */
+/**************************************************************/
+
+/**************************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+***************************************************************/
+
+void LVQ_Start_Up (char *dataFile)
+{
+ read_lvq_parameters(dataFile);
+ allocate_lvq_memory(); // allocate memory for LVQ
+ lvq_initialize();
+}
+
+/*************************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+*************************************************************/
+void LVQ_Clean_Up (void)
+{
+ store_lvq_results();
+ free_lvq_memory();
+}
+
+
+/************************************************************/
+/* LVQ functons */
+/************************************************************/
+/************************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+*************************************************************/
+void LVQ_Main_Loop (void)
+{
+ BOOLEAN running;
+
+ running = TRUE;
+ while (running)
+ {
+ if ((lvq_cur_state == LVQ_TRAINING_DONE) || (lvq_cur_state == LVQ_RECALL_DONE) )
+ {
+ running = FALSE;
+ }
+ lvq_state_handler(lvq_cur_state);
+ }
+}
+
+/************************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+*************************************************************/
+static void read_lvq_parameters (char *dataFile)
+{
+ FILE *frun;
+
+ // open the runfile to input parameters
+ if ((frun=fopen(dataFile,"r"))==NULL)
+ {
+ printf("Cant read run file");
+ exit(1);
+ }
+
+ // read LVQ's parameters from run file
+ // read LVQ's environment data
+ fscanf(frun, "%d",&(lvq.env.operation_mode)); // training or recall
+ fscanf(frun, "%d",&(lvq.env.train_mode)); // training mode if in training operation mode
+ fscanf(frun, "%f",&(lvq.env.eta)); // learning rate
+ fscanf(frun, "%f",&(lvq.env.shrink)); // learning rate shrinking coefficient
+ fscanf(frun, "%f",&(lvq.env.gama)); // bias factor
+ fscanf(frun, "%f",&(lvq.env.beta)); // beta
+ fscanf(frun, "%f",&(lvq.env.criterion)); // training error criterion for termination
+ fscanf(frun, "%d",&(lvq.env.max_ite)); // maximum number of generations
+ fscanf(frun, "%d",&(lvq.env.max_tra_pat)); // total number of training patterns
+ fscanf(frun, "%d",&(lvq.env.conscience)); // 0: no conscience, 1: conscience
+ fscanf(frun, "%d",&(lvq.env.no_clusters)); // number of clusters
+
+ // read LVQ's Arch
+
+ // read in/out pattern parameters
+ fscanf(frun, "%d",&(patset.size)); // number of pattern
+ // read pattern input dimension
+ fscanf(frun, "%d",&(patset.dim_in)); // input dimension
+ // read pattern output dimension
+ fscanf(frun, "%d",&(patset.dim_out)); // output dimension
+ // read pattern file name
+ fscanf(frun, "%s", pat_file_name); // pattern file name
+
+ fclose(frun);
+
+ print_net_parameters();
+}
+
+static void print_net_parameters (void)
+{
+ // test
+ printf( "%d\n",(lvq.env.operation_mode)); // training or recall
+ printf( "%d\n",(lvq.env.train_mode));
+ printf( "%f\n",(lvq.env.eta)); // learning rate
+ printf( "%f\n",(lvq.env.shrink)); // learning rate shrinking coefficient
+ printf( "%f\n",(lvq.env.gama)); // bias factor
+ printf( "%f\n",(lvq.env.beta)); //
+ printf( "%f\n",(lvq.env.criterion)); // training error criterion for termination
+ printf( "%d\n",(lvq.env.max_ite)); // maximum number of generations
+ printf( "%d\n",(lvq.env.max_tra_pat)); // total number of training patterns
+ printf( "%d\n",(lvq.env.conscience));
+ printf( "%d\n",(lvq.env.no_clusters));
+
+ printf( "%d\n",(patset.size)); // number of pattern
+ // read pattern input dimension
+ printf( "%d\n",(patset.dim_in)); // input dimension
+ // read pattern output dimension
+ printf( "%d\n",(patset.dim_out)); // output dimension
+ // read pattern file name
+ printf( "%s\n", pat_file_name);
+}
+
+static void update_recall_result(void)
+{
+ int idx_out;
+
+ for (idx_out = 0; idx_out < patset.dim_out; idx_out++)
+ { // loop throught the output of the target pattern
+ if (target_out[idx_out] > 0.1)
+ {
+ #if SAVE_CLUSTER
+ cluster_result[lvq.env.cur_pat][0] = idx_out;
+ #endif
+ test_result[lvq.env.winner][idx_out]++;
+ }
+ }
+
+ #if SAVE_CLUSTER
+ cluster_result[lvq.env.cur_pat][1] = lvq.env.winner;
+ #endif
+}
+
+/************************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+*************************************************************/
+static void allocate_lvq_memory (void)
+{
+ int idx;
+
+ // allocate memory for LVQ net
+ lvq.arch.size = 2;
+ lvq.arch.layers = calloc(lvq.arch.size,sizeof(NN_Layer_Arch_Type));
+
+ //allocate memory for input layer
+ lvq.arch.layers[0].size = patset.dim_in;
+ lvq.arch.layers[0].neurons = calloc(patset.dim_in,sizeof(Neuron_Type));
+ lvq.arch.layers[0].layer_type = NN_INPUT_LAYER;
+ // specify and allocate memory for neurons of input layer
+ for (idx =0 ; idx < lvq.arch.layers[0].size ; idx++)
+ {
+ lvq.arch.layers[0].neurons[idx].neuron_function = NN_LINEAR_FUNCTION;
+ FVectorAllocate(&(lvq.arch.layers[0].neurons[idx].delta_w),1);
+ FVectorAllocate(&(lvq.arch.layers[0].neurons[idx].w),1);
+ }
+
+ // allocate memory for output layer
+ lvq.arch.layers[lvq.arch.size - 1].size = lvq.env.no_clusters;
+ lvq.arch.layers[lvq.arch.size - 1].neurons = calloc(lvq.env.no_clusters,sizeof(Neuron_Type));
+ lvq.arch.layers[lvq.arch.size - 1].layer_type = NN_OUTPUT_LAYER;
+ // specify and allocate memory for neurons of output layer
+ for (idx =0 ; idx < lvq.arch.layers[lvq.arch.size - 1].size ; idx++)
+ {
+ lvq.arch.layers[lvq.arch.size - 1].neurons[idx].neuron_function = NN_LINEAR_FUNCTION;
+ lvq.arch.layers[lvq.arch.size - 1].neurons[idx].c_f = 1.0/lvq.env.no_clusters;
+ lvq.arch.layers[lvq.arch.size - 1].neurons[idx].b_v = 0.0;
+ lvq.arch.layers[lvq.arch.size - 1].neurons[idx].w_s = 0; // no winner at beginning
+ FVectorAllocate(&(lvq.arch.layers[lvq.arch.size - 1].neurons[idx].delta_w),lvq.arch.layers[lvq.arch.size - 2].size);
+ FVectorAllocate(&(lvq.arch.layers[lvq.arch.size - 1].neurons[idx].w),lvq.arch.layers[lvq.arch.size - 2].size);
+ }
+
+ // allocate memory for pattern set
+ FMatrixAllocate(&(patset.patterns),patset.size,(patset.dim_in + patset.dim_out));
+
+ #if SAVE_CLUSTER
+ // allocate memory for storing cluster information for each pattern
+ if (lvq.env.operation_mode == NN_RECALL)
+ { // allocate for test result storage
+ IMatrixAllocate(&(cluster_result),patset.size,2);
+ }
+ #endif
+
+ // allocate memory for target output
+ target_out = calloc(patset.dim_out,sizeof(float));
+
+ if (lvq.env.operation_mode == NN_RECALL)
+ { // allocate for test result storage
+ IMatrixAllocate(&(test_result),lvq.env.no_clusters,patset.dim_out);
+ }
+}
+
+/************************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+*************************************************************/
+static void free_lvq_memory (void)
+{
+ int idx;
+ if (lvq.env.operation_mode == NN_RECALL)
+ { // allocate for test result storage
+ IMatrixFree(test_result,lvq.env.no_clusters);
+ #if SVAE_CLUSTER
+ ImatrixFree(cluster_result,patset.size);
+ #endif
+ }
+ // free memory for pattern set
+ FMatrixFree(patset.patterns,patset.size);
+
+ // free memory for LVQ net
+ // free memory for output layer
+ for (idx =0 ; idx < lvq.arch.layers[lvq.arch.size - 1].size ; idx++)
+ {
+ free(lvq.arch.layers[lvq.arch.size - 1].neurons[idx].w);
+ free(lvq.arch.layers[lvq.arch.size - 1].neurons[idx].delta_w);
+ }
+ free(lvq.arch.layers[lvq.arch.size - 1].neurons);
+
+ // free memory for input layer
+ for (idx =0 ; idx < lvq.arch.layers[0].size ; idx++)
+ {
+ free(lvq.arch.layers[0].neurons[idx].w);
+ free(lvq.arch.layers[0].neurons[idx].delta_w);
+ }
+ free(lvq.arch.layers[0].neurons);
+
+ free(lvq.arch.layers);
+
+ free(target_out);
+}
+
+/************************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+*************************************************************/
+static void lvq_initialize (void)
+{
+ int idx_i,idx_j;
+ int stime;
+ long ltime;
+ FILE *frun;
+
+ // get the current calendar time
+ ltime = time(NULL);
+ stime = (unsigned) ltime/2;
+ srand(stime);
+
+ // lvq read patterns from file
+ // open the pattern file to obtain in/out patterns
+ if ((frun=fopen(pat_file_name,"r"))==NULL)
+ {
+ printf("Cant read pattern file");
+ exit(1);
+ }
+
+ for (idx_i = 0; idx_i < patset.size; idx_i++)
+ {
+ for (idx_j = 0; idx_j < (patset.dim_in + patset.dim_out) ; idx_j++)
+ {
+ fscanf(frun, "%f",&(patset.patterns[idx_i][idx_j]));
+ }
+ }
+ fclose(frun);
+
+ lvq.env.winner = 0;
+ // lvq randomly initialize weights
+ lvq_initialize_weights();
+
+ // lvq initial setting
+ lvq_cur_state = LVQ_GET_PATTERN;
+ lvq.env.cur_ite = 0;
+ lvq.env.cur_pat = 0;
+ lvq.env.pat_counter = 0;
+}
+
+/************************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+*************************************************************/
+static void lvq_initialize_weights (void)
+{
+ int idx_layer,idx_neuron,idx_weight;
+ float prob;
+ int idx_cn, idx_pn;
+ FILE *fout;
+
+ if (lvq.env.operation_mode == NN_TRAINING)
+ {
+ // initialize weight for the neurons in the output layer
+ for (idx_weight = 0; idx_weight < (lvq.arch.layers[lvq.arch.size - 2].size); idx_weight++ )
+ {
+ for (idx_neuron = 0; idx_neuron < (lvq.arch.layers[lvq.arch.size - 1].size); idx_neuron++)
+ {
+ prob = (rand()%1000)/1000.0;
+ lvq.arch.layers[lvq.arch.size - 1].neurons[idx_neuron].w[idx_weight] = prob; // (0,1)
+ lvq.arch.layers[lvq.arch.size - 1].neurons[idx_neuron].delta_w[idx_weight] = 0.0;
+ }
+ }
+ }
+ else
+ { // RECALL operation mode
+ // read in weights from file
+ // open file for write
+ if ((fout=fopen("LVQ_res.txt","r"))==NULL)
+ {
+ printf("Cant open file for reading LVQ training results");
+ exit(1);
+ }
+ for (idx_layer = 1; idx_layer < (lvq.arch.size) ; idx_layer++ )
+ { // loop through the layers
+ for (idx_cn = 0; idx_cn < (lvq.arch.layers[idx_layer].size) ; idx_cn++)
+ { // loop through the neurons in the current layer
+ for (idx_pn = 0; idx_pn < (lvq.arch.layers[idx_layer - 1].size) ; idx_pn++)
+ { // loop through the connect weights of the current neurons
+ fscanf(fout,"%f",&(lvq.arch.layers[idx_layer].neurons[idx_cn].w[idx_pn]));
+ }
+ }
+ }
+ fclose(fout);
+ }
+}
+
+/************************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+*************************************************************/
+static void store_lvq_results (void)
+{
+ // store weights
+ int idx_layer, idx_cn, idx_pn;
+ FILE *fout;
+
+ // open file for write
+
+ if (lvq.env.operation_mode == NN_TRAINING)
+ {
+ if ((fout=fopen("LVQ_res.txt","w"))==NULL)
+ {
+ printf("Cant open file for writing LVQ weights");
+ exit(1);
+ }
+ for (idx_layer = 1; idx_layer < (lvq.arch.size) ; idx_layer++ )
+ { // loop through the layers
+ for (idx_cn = 0; idx_cn < (lvq.arch.layers[idx_layer].size) ; idx_cn++)
+ { // loop through the neurons in the current layer
+ for (idx_pn = 0; idx_pn < (lvq.arch.layers[idx_layer - 1].size) ; idx_pn++)
+ { // loop through the connect weights of the current neurons
+ fprintf(fout,"%f\n",lvq.arch.layers[idx_layer].neurons[idx_cn].w[idx_pn]);
+ }
+ fprintf(fout,"\n");
+ }
+ }
+ fclose(fout);
+ }
+ else // RECALL
+ {
+ if ((fout=fopen("LVQ_test.txt","w"))==NULL)
+ {
+ printf("Cant open file for writing LVQ test results");
+ exit(1);
+ }
+ fprintf(fout,"\n");
+
+ for (idx_pn = 0; idx_pn < patset.dim_out; idx_pn++ )
+ {
+ fprintf(fout,"\tClass %d",idx_pn);
+ }
+ fprintf(fout,"\n-----------------------------------------------------\n");
+
+ for (idx_cn = 0; idx_cn < lvq.env.no_clusters; idx_cn++ )
+ {
+ fprintf(fout,"%d\t", idx_cn);
+ for (idx_pn = 0; idx_pn < patset.dim_out; idx_pn++ )
+ {
+ fprintf(fout,"%d\t",test_result[idx_cn][idx_pn]);
+ }
+ fprintf(fout,"\n");
+ }
+ fprintf(fout,"\n");
+ #if SAVE_CLUSTER
+ fprintf(fout,"\n\--------------------------------------------------------\n");
+ fprintf(fout,"\tcluster number for each pattern\n");
+ fprintf(fout,"\nindex\ttarget\tcluster no. \n");
+ for (idx_cn = 0; idx_cn < patset.size; idx_cn++)
+ {
+ fprintf(fout,"%d\t%d\t%d\n",idx_cn, cluster_result[idx_cn][0], cluster_result[idx_cn][1]);
+ }
+ fprintf(fout,"\n");
+ #endif
+ fclose(fout);
+ }
+}
+
+/************************************************************/
+/* LVQ State Handlers */
+/************************************************************/
+
+// Listing 6.10 Main part of the LVQ state machine
+/************************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+*************************************************************/
+static void lvq_state_handler (int state_index)
+{
+ switch (state_index)
+ {
+ case LVQ_GET_PATTERN:
+ lvq_get_pattern();
+ break;
+ case LVQ_WEIGHT_NORMALIZATION:
+ lvq_weight_normalization();
+ break;
+ case LVQ_FEEDFORWARD_INPUT:
+ lvq_feedforward_input();
+ break;
+ case LVQ_FEEDFORWARD_OUTPUT:
+ lvq_feedforward_output();
+ break;
+ case LVQ_WINNING_NEURON:
+ lvq_winning_neuron();
+ break;
+ case LVQ_WEIGHT_STEP_CHANGE:
+ lvq_weight_step_change();
+ break;
+ case LVQ_WEIGHT_CHANGE:
+ lvq_weight_change();
+ break;
+ case LVQ_NEXT_PATTERN:
+ lvq_next_pattern();
+ break;
+ case LVQ_NEXT_ITERATION:
+ lvq_next_iteration();
+ break;
+ case LVQ_UPDATE_LEARNING_RATE:
+ lvq_update_learning_rate();
+ break;
+ case LVQ_UPDATE_CONSCIENCE_FACTOR:
+ lvq_update_conscience_factor();
+ break;
+ case LVQ_TRAINING_DONE:
+ lvq_training_done();
+ break;
+ case LVQ_RECALL_DONE:
+ lvq_recall_done();
+ break;
+ default:
+ break;
+ }
+}
+
+/************************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+*************************************************************/
+static void lvq_get_pattern (void)
+{
+ int idx;
+
+ for (idx = 0; idx < (lvq.arch.layers[0].size); idx++)
+ {
+ lvq.arch.layers[0].neurons[idx].in = patset.patterns[lvq.env.cur_pat][idx];
+ }
+ for (idx = 0; idx < patset.dim_out; idx++ )
+ {
+ target_out[idx] = patset.patterns[lvq.env.cur_pat][patset.dim_in + idx];
+ //printf("%d: %f\n",lvq.env.cur_pat, target_out[idx]);
+ }
+ if (lvq.env.operation_mode == NN_TRAINING)
+ {
+ lvq_cur_state = LVQ_WEIGHT_NORMALIZATION;
+ }
+ else
+ {
+ lvq_cur_state = LVQ_FEEDFORWARD_INPUT;
+ }
+}
+
+/************************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+*************************************************************/
+static void lvq_weight_normalization (void)
+{
+ int idx_cn, idx_pn;
+ double sum;
+ float temp_f;
+
+ for (idx_cn = 0; idx_cn < (lvq.arch.layers[1].size) ; idx_cn++)
+ { // loop through neurons in the output layer
+ sum = 0.0;
+ for (idx_pn = 0; idx_pn < (lvq.arch.layers[0].size) ; idx_pn++ )
+ { // loop through all the weights connected to this neuron
+ sum += lvq.arch.layers[1].neurons[idx_cn].w[idx_pn] * lvq.arch.layers[1].neurons[idx_cn].w[idx_pn];
+ }
+ sum = sqrt(sum);
+ if (sum > 0.0)
+ {
+ for (idx_pn = 0; idx_pn < (lvq.arch.layers[0].size) ; idx_pn++ )
+ { // loop through all the weights connected to this neuron
+ temp_f = lvq.arch.layers[1].neurons[idx_cn].w[idx_pn]/sum;
+ lvq.arch.layers[1].neurons[idx_cn].w[idx_pn] = temp_f;
+ }
+ }
+ }
+ lvq_cur_state = LVQ_FEEDFORWARD_INPUT;
+}
+
+/************************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+*************************************************************/
+static void lvq_feedforward_input(void)
+{
+ int idx;
+
+ for (idx = 0; idx < (lvq.arch.layers[0].size); idx++)
+ {
+ lvq.arch.layers[0].neurons[idx].out = lvq.arch.layers[0].neurons[idx].in;
+ }
+
+ lvq_cur_state = LVQ_FEEDFORWARD_OUTPUT;
+}
+
+
+/************************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+*************************************************************/
+static void lvq_feedforward_output (void)
+{
+ int idx_out, idx_prev;
+ double sum, temp_f;
+ for (idx_out = 0; idx_out < (lvq.arch.layers[1].size ); idx_out++)
+ { // loop throught the neurons of the output layer
+ sum = 0.0;
+ for ( idx_prev = 0; idx_prev < (lvq.arch.layers[0].size ); idx_prev++)
+ { // loop through the neurons of the input layer
+ temp_f = (lvq.arch.layers[0].neurons[idx_prev].out - lvq.arch.layers[1].neurons[idx_out].w[idx_prev] );
+ sum += (temp_f * temp_f);
+ }
+ temp_f = sqrt(sum);
+ lvq.arch.layers[1].neurons[idx_out].in = temp_f;
+ lvq.arch.layers[1].neurons[idx_out].out = activate_function(temp_f,lvq.arch.layers[1].neurons[idx_out].neuron_function);
+ }
+ lvq_cur_state = LVQ_WINNING_NEURON;
+}
+
+
+/************************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+*************************************************************/
+static void lvq_winning_neuron (void)
+{
+ int idx, temp_w;
+ float min_v = 1000.0;
+
+ for (idx = 0; idx < (lvq.arch.layers[1].size); idx++)
+ { // loop through the neurons in output layer
+ if ((lvq.arch.layers[1].neurons[idx].out - lvq.arch.layers[1].neurons[idx].b_v) < min_v)
+ {
+ min_v = lvq.arch.layers[1].neurons[idx].out - lvq.arch.layers[1].neurons[idx].b_v;
+ temp_w = idx;
+ }
+ }
+ lvq.arch.layers[1].neurons[lvq.env.winner].w_s = 0; // clear the old winner status
+ lvq.env.winner = temp_w; // assign new winner
+ lvq.arch.layers[1].neurons[lvq.env.winner].w_s = 1; // set new winner status
+
+ if (lvq.env.operation_mode == NN_TRAINING)
+ {
+ lvq_cur_state = LVQ_WEIGHT_STEP_CHANGE;
+ }
+ else
+ { // recall
+ update_recall_result();
+ lvq_cur_state = LVQ_NEXT_PATTERN;
+ }
+
+
+}
+
+/************************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+*************************************************************/
+static void lvq_next_pattern (void)
+{
+ if (lvq.env.operation_mode == NN_TRAINING)
+ {
+ if (lvq.env.train_mode == LVQ_RANDOM_MODE)
+ { // random training
+ lvq.env.cur_pat = rand()%(lvq.env.max_tra_pat); // next random pattern index
+ }
+ else
+ { // sequential training
+ if (++lvq.env.cur_pat >= lvq.env.max_tra_pat)
+ {
+ lvq.env.cur_pat = 0;
+ }
+ }
+ if ((++lvq.env.pat_counter) <lvq.env.max_tra_pat)
+ { // add other termination criterion here
+ lvq_cur_state = LVQ_UPDATE_LEARNING_RATE;
+ }
+ else
+ {
+ lvq_cur_state = LVQ_NEXT_ITERATION;
+ }
+ }
+ else // recall
+ {
+ if ((++lvq.env.cur_pat) < patset.size)
+ {
+ lvq_cur_state = LVQ_GET_PATTERN;
+ }
+ else
+ {
+ lvq_cur_state = LVQ_RECALL_DONE;
+ }
+ }
+}
+
+/************************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+*************************************************************/
+static void lvq_next_iteration (void)
+{
+ lvq.env.pat_counter = 0;
+ if ((++lvq.env.cur_ite) < lvq.env.max_ite)
+ { // add other termination criterion here
+ lvq_cur_state = LVQ_UPDATE_LEARNING_RATE;
+ }
+ else
+ {
+ lvq.env.pat_counter = 0;
+ lvq_cur_state = LVQ_TRAINING_DONE;
+ }
+}
+
+/************************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+*************************************************************/
+static void lvq_weight_step_change (void)
+{
+ int idx_pn;
+
+ for (idx_pn = 0; idx_pn < (lvq.arch.layers[0].size) ; idx_pn++)
+ { // loop through the connect weights of the current neurons
+ lvq.arch.layers[1].neurons[lvq.env.winner].delta_w[idx_pn] = lvq.arch.layers[0].neurons[idx_pn].out - lvq.arch.layers[1].neurons[lvq.env.winner].w[idx_pn];
+ lvq.arch.layers[1].neurons[lvq.env.winner].delta_w[idx_pn] *= lvq.env.eta;
+ }
+ lvq_cur_state = LVQ_WEIGHT_CHANGE;
+}
+
+/************************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+*************************************************************/
+static void lvq_weight_change (void)
+{
+ int idx_pn;
+
+ for (idx_pn = 0; idx_pn < (lvq.arch.layers[0].size) ; idx_pn++)
+ { // loop through the connect weights of the current neurons
+ lvq.arch.layers[1].neurons[lvq.env.winner].w[idx_pn] += lvq.arch.layers[1].neurons[lvq.env.winner].delta_w[idx_pn];
+ }
+
+ lvq_cur_state = LVQ_NEXT_PATTERN;
+}
+
+/************************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+*************************************************************/
+static void lvq_update_learning_rate (void)
+{
+ lvq.env.eta *= lvq.env.shrink;
+ lvq_cur_state = LVQ_UPDATE_CONSCIENCE_FACTOR;
+}
+
+/************************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+*************************************************************/
+static void lvq_update_conscience_factor (void)
+{
+ int idx;
+ float temp_f;
+ if (lvq.env.conscience == LVQ_CONSCIENCE)
+ {
+ for (idx = 0; idx < (lvq.arch.layers[1].size); idx++)
+ { // loop through the neurons in output layer
+ temp_f = lvq.arch.layers[1].neurons[idx].c_f;
+ lvq.arch.layers[1].neurons[idx].c_f = temp_f + lvq.env.beta * (lvq.arch.layers[1].neurons[idx].w_s - temp_f);
+ lvq.arch.layers[1].neurons[idx].b_v = lvq.env.gama * (1.0/lvq.env.no_clusters - lvq.arch.layers[1].neurons[idx].c_f );
+ }
+ }
+ lvq_cur_state = LVQ_GET_PATTERN;
+}
+
+/************************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+*************************************************************/
+static void lvq_training_done (void)
+{
+ lvq_weight_normalization();
+}
+
+/************************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+*************************************************************/
+static void lvq_recall_done (void)
+{
+
+}
+
+/************************************************************/
+/* neuron activation functions */
+/************************************************************/
+
+/************************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+*************************************************************/
+static float activate_function(float sum, int index)
+{
+ float res;
+ switch (index)
+ {
+ case NN_LINEAR_FUNCTION:
+ res = nn_linear_function(sum);
+ break;
+ case NN_GAUSIAN_FUNCTION:
+ res = nn_gausian_function(sum);
+ break;
+ case NN_SIGMOID_FUNCTION:
+ res = nn_sigmoid_function(sum);
+ break;
+ default:
+ break;
+ }
+ return (res);
+}
+
+/************************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+*************************************************************/
+static float nn_linear_function (float sum)
+{
+ float res;
+
+ res = sum;
+ return (res);
+}
+
+/************************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+*************************************************************/
+static float nn_gausian_function (float sum)
+{
+ float res;
+ res = sum; // No Guassian in LVQ
+ return (res);
+}
+
+/************************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+*************************************************************/
+static float nn_sigmoid_function (float sum)
+{
+ float res;
+ res = sum; // No sigmoid in LVQ
+ return (res);
+}
+
diff --git a/lvq/lvq.h b/lvq/lvq.h
new file mode 100644
index 0000000..79e1559
--- /dev/null
+++ b/lvq/lvq.h
@@ -0,0 +1,127 @@
+#ifndef LVQ_NN_H
+#define LVQ_NN_H
+
+#include "nnet.h"
+
+/**************************************************************/
+/* Constants and Macros */
+/**************************************************************/
+
+// Listing 6.6 Enumeration data types for the LVQ network
+/**************************************************************/
+/* Enumerations */
+/**************************************************************/
+typedef enum LVQ_Training_Mode_Tag
+{
+ LVQ_RANDOM_MODE,
+ LVQ_SEQUENTIAL_MODE,
+ NUM_LVQ_TRAINING_MODES
+} LVQ_Training_Mode_Type;
+
+
+typedef enum LVQ_State_Tag
+{
+ LVQ_GET_PATTERN,
+ LVQ_WEIGHT_NORMALIZATION,
+ LVQ_FEEDFORWARD_INPUT,
+ LVQ_FEEDFORWARD_OUTPUT,
+ LVQ_WINNING_NEURON,
+ LVQ_WEIGHT_STEP_CHANGE,
+ LVQ_WEIGHT_CHANGE,
+ LVQ_NEXT_PATTERN,
+ LVQ_NEXT_ITERATION,
+ LVQ_UPDATE_LEARNING_RATE,
+ LVQ_UPDATE_CONSCIENCE_FACTOR,
+ LVQ_TRAINING_DONE,
+ LVQ_RECALL_DONE,
+ NUM_LVQ_STATES
+} LVQ_State_Type;
+
+typedef enum LVQ_Conscience_Type_Tag
+{
+ LVQ_NO_CONSCIENCE,
+ LVQ_CONSCIENCE,
+ NUM_LVQ_CONSCIENCE
+} LVQ_Conscience_Type;
+
+// Listing 6.7 Structure data types for the LVQ netowrk
+/**************************************************************/
+/* Structures */
+/**************************************************************/
+typedef struct Neuron_Type_Tag
+{
+ NN_Function_Type neuron_function; // neuron function
+ float in; // neuron input
+ float out; // neuron output, euclidean distance for output neuron
+ FVECTOR w; // connection weights from the previous layers
+ FVECTOR delta_w; // step change of weights
+ float c_f; // conscience factor
+ float b_v; // bias value
+ int w_s; // winner status, y in equation
+} Neuron_Type;
+
+typedef struct NN_Layer_Arch_Type_Tag
+{
+ int size; // number of neurons in the layer
+ Neuron_Type *neurons; // pointer to the array of the neurons
+ NN_Layer_Type layer_type;
+} NN_Layer_Arch_Type;
+
+typedef struct LVQ_Arch_Type_Tag
+{
+ int size; // number of layers
+ NN_Layer_Arch_Type *layers; // pointer to the layers
+} LVQ_Arch_Type;
+
+typedef struct LVQ_Env_Type_Tag
+{
+ NN_Operation_Mode_Type operation_mode; // training or recall
+ LVQ_Training_Mode_Type train_mode; // training mode if in training operation mode
+ float eta; // learning rate
+ float gama; // bias factor
+ float beta; //
+ float shrink; // learning rate (eta) shrinking coefficient
+ float criterion; // training criterion for termination
+ int max_ite; // maximum number of iterations
+ int cur_ite; // current iteration index
+ int max_tra_pat; // total number of training patterns
+ int cur_pat; // current training pattern index
+ int pat_counter; // pattern index increase counter within a iteration
+ LVQ_Conscience_Type conscience; // 0: no conscience, 1: conscience
+ int winner; // index of winning neuron
+ int no_clusters; // number of clusters
+} LVQ_Env_Type;
+
+typedef struct LVQ_Type_Tag
+{
+ LVQ_Arch_Type arch;
+ LVQ_Env_Type env;
+} LVQ_Type;
+
+typedef struct LVQ_Pattern_Set_Type_Tag
+{
+ int size; // number of patterns
+ int dim_in; // input dimension
+ int dim_out; // output dimension
+ FMATRIX patterns; // pointer to the array of in/out patterns
+} LVQ_Pattern_Set_Type;
+
+/**************************************************************/
+/* Function Prototypes for functions with file level scope */
+/**************************************************************/
+
+
+/**************************************************************/
+/* Const Variable with global level scope */
+/**************************************************************/
+
+
+/**************************************************************/
+/* Function Prototypes for functions with global level scope */
+/**************************************************************/
+
+extern void LVQ_Main_Loop(void);
+extern void LVQ_Start_Up(char *dataFile);
+extern void LVQ_Clean_Up(void);
+
+#endif
diff --git a/lvq/main.c b/lvq/main.c
new file mode 100644
index 0000000..8d08e6b
--- /dev/null
+++ b/lvq/main.c
@@ -0,0 +1,42 @@
+#include "headfile.h"
+#include "lvq.h"
+#include "mem_loc.h"
+
+/**************************************/
+/* Constants and Macros */
+/**************************************/
+
+static void main_start_up(char *dataFile);
+static void main_clean_up(void);
+
+// Listing 6.9 LVQ main() routine
+void main (int argc, char *argv[])
+{
+ // check command line
+ if (argc != 2)
+ {
+ printf("Usage: exe_file run_file");
+ exit(1);
+ }
+
+ // initialize
+ main_start_up(argv[1]);
+
+ // run
+ LVQ_Main_Loop();
+
+ // clean up memory space
+ main_clean_up();
+
+}
+
+static void main_start_up (char *dataFile)
+{
+ LVQ_Start_Up(dataFile);
+
+}
+
+static void main_clean_up (void)
+{
+ LVQ_Clean_Up();
+}
diff --git a/lvq/mem_loc.c b/lvq/mem_loc.c
new file mode 100644
index 0000000..81d35c4
--- /dev/null
+++ b/lvq/mem_loc.c
@@ -0,0 +1,98 @@
+#include "headfile.h"
+#include "mem_loc.h"
+
+/**********************************************************************
+ If you want to allocate a block larger than 64K, you must use
+ farcalloc instead of calloc
+**********************************************************************/
+
+/* Memory allocation functions for integer matrix and vector */
+
+void IVectorAllocate(IVECTOR *ivector, int nCols)
+{
+ if ((*ivector = (IVECTOR) calloc(nCols, sizeof(long int))) == NULL)
+ {
+ fprintf(stderr, "Sorry! Not enough memory for vector\n");
+ exit(1);
+ }
+}
+
+/* Allocate space for columns (int cells) for
+ dynamic two dimensional matrix[rows][cols]
+*/
+
+void IAllocateCols(P_INT imatrix[], int nRows, int nCols)
+{
+ int i;
+ for (i = 0; i < nRows; i++)
+ IVectorAllocate(&imatrix[i], nCols);
+}
+
+/* Allocate space for a two dimensional dynamic matrix [rows] [cols]
+*/
+
+void IMatrixAllocate(IMATRIX *ipmatrix, int nRows, int nCols)
+{
+ if ( (*ipmatrix = (IMATRIX) calloc(nRows, sizeof(long int) ) ) == NULL)
+ {
+ fprintf(stderr, "Sorry! Not enough memory for matrix\n");
+ exit(1);
+ }
+
+ IAllocateCols(*ipmatrix, nRows, nCols);
+}
+
+/* free space for two dimensional dynamic array */
+void IMatrixFree(IMATRIX imatrix, int nRows)
+{
+ int i;
+ for (i = 0; i < nRows; i++)
+ free(imatrix[i]);
+ free(imatrix);
+}
+
+/* *************** Float routines *************** */
+
+void FVectorAllocate(FVECTOR *fvector, int nCols)
+{
+ if ((*fvector = (FVECTOR) calloc(nCols, sizeof(float))) == NULL)
+ {
+ fprintf(stderr, "Sorry! Not enough memory for fvector\n");
+ exit(1);
+ }
+}
+
+/* Allocate space for columns (int cells) for
+ dynamic two dimensional matrix[rows][cols]
+*/
+
+void FAllocateCols(P_FLOAT fmatrix[], int nRows, int nCols)
+{
+ int i;
+ for (i = 0; i < nRows; i++)
+ FVectorAllocate(&fmatrix[i], nCols);
+}
+
+/* Allocate space for a two dimensional dynamic matrix [rows] [cols]
+*/
+
+void FMatrixAllocate(FMATRIX *fpmatrix, int nRows, int nCols)
+{
+ if ( (*fpmatrix = (FMATRIX) calloc(nRows, sizeof(float))) == NULL)
+ {
+ fprintf(stderr, "Sorry! Not enough memory for fmatrix\n");
+ exit(1);
+ }
+
+ FAllocateCols(*fpmatrix, nRows, nCols);
+}
+
+/* free space for two dimensional dynamic array */
+void FMatrixFree(FMATRIX fmatrix, int nRows)
+{
+ int i;
+ for (i = 0; i < nRows; i++)
+ free(fmatrix[i]);
+ free(fmatrix);
+}
+
diff --git a/lvq/mem_loc.h b/lvq/mem_loc.h
new file mode 100644
index 0000000..e79b2cd
--- /dev/null
+++ b/lvq/mem_loc.h
@@ -0,0 +1,17 @@
+#ifndef __MEM_LOC_H__
+#define __MEM_LOC_H__
+
+#include "definiti.h"
+
+extern void IVectorAllocate(IVECTOR *ivector, int nCols);
+extern void IMatrixFree(IMATRIX imatrix, int nRows);
+extern void IMatrixAllocate(IMATRIX *ipmatrix, int nRows, int nCols);
+extern void IAllocateCols(P_INT imatrix[], int nRows, int nCols);
+
+extern void FVectorAllocate(FVECTOR *fvector, int nCols);
+extern void FMatrixFree(FMATRIX fmatrix, int nRows);
+extern void FMatrixAllocate(FMATRIX *fpmatrix, int nRows, int nCols);
+extern void FAllocateCols(P_FLOAT fmatrix[], int nRows, int nCols);
+
+#endif
+
diff --git a/lvq/nnet.h b/lvq/nnet.h
new file mode 100644
index 0000000..343ff1e
--- /dev/null
+++ b/lvq/nnet.h
@@ -0,0 +1,32 @@
+#ifndef NEURAL_NET_H
+#define NEURAL_NET_H
+
+#include "definiti.h"
+
+/**************************************************************/
+/* Enumerations */
+/**************************************************************/
+typedef enum NN_Operation_Mode_Type_Tag
+{
+ NN_TRAINING,
+ NN_RECALL,
+ NUM_BP_OPERATION_MODES
+} NN_Operation_Mode_Type;
+
+typedef enum NN_Function_Type_Tag
+{
+ NN_LINEAR_FUNCTION,
+ NN_GAUSIAN_FUNCTION,
+ NN_SIGMOID_FUNCTION,
+ NUM_NN_FUNCTION_TYPES
+} NN_Function_Type;
+
+typedef enum NN_Layer_Type_Tag
+{
+ NN_INPUT_LAYER,
+ NN_HIDDEN_LAYER,
+ NN_OUTPUT_LAYER,
+ NUM_NN_LAYERS
+} NN_Layer_Type;
+
+#endif
|
btbytes/ci
|
ac7d68c4ae4540e2e6f595b938245f4145b9944b
|
fixed typo
|
diff --git a/psos/psostate.c b/psos/psostate.c
index abea4ff..cc5ac18 100644
--- a/psos/psostate.c
+++ b/psos/psostate.c
@@ -334,1618 +334,1618 @@ static void free_pso_memory (void)
*
* Parameters:
*
* Returns:
*
* Description:
*
*************************************************************/
static void pso_initialize (void)
{
int idx_i,idx_j;
int stime;
long ltime;
// get the current calendar time
ltime = time(NULL);
stime = (unsigned) ltime/2;
srand(stime);
for (idx_i = 0; idx_i <NUM_PSO ; idx_i++)
{
cur_pso = idx_i;
for (idx_j = 0; idx_j < (psos[cur_pso].popu_size) ; idx_j++ )
{
psos[cur_pso].popu_index = idx_j;
PSO_initialize_handler(psos[cur_pso].env_data.init_type);
}
}
cur_pso = 0; // done with initialization, move to the first PSO
PSO_current_state = PSO_EVALUATE; // move to the EVALUATE state
psos[cur_pso].popu_index = 0; // satrt with the first particle
psos[cur_pso].gene_index = 0; // start from the first generation of the first population
pso_cycle_index = 0; // start from the first cycle
}
/*************************************************/
/* PSO State Handlers */
/*************************************************/
// Listing 4.19 & 4.24 The PSO state handling routine
/*************************************************
* Function:
*
* Parameters:
*
* Returns:
*
* Description:
*
**************************************************/
static void pso_state_handler (int state_index)
{
switch (state_index)
{
case PSO_UPDATE_INERTIA_WEIGHT:
PSO_update_inertia_weight();
break;
case PSO_EVALUATE:
PSO_evaluate();
break;
case PSO_UPDATE_GLOBAL_BEST:
PSO_update_global_best();
break;
case PSO_UPDATE_LOCAL_BEST:
PSO_update_local_best();
break;
case PSO_UPDTAE_VELOCITY:
PSO_update_velocity();
break;
case PSO_UPDATE_POSITION:
PSO_update_position();
break;
case PSO_GOAL_REACH_JUDGE:
PSO_goal_reach_judge();
break;
case PSO_NEXT_GENERATION:
PSO_next_generation();
break;
case PSO_UPDATE_PBEST_EACH_CYCLE:
PSO_update_pbest_each_cycle();
break;
case PSO_NEXT_PSO:
PSO_next_pso();
break;
case PSOS_DONE:
PSOs_done();
break;
default:
printf("wrong state index\n");
exit(1);
break;
}
}
// Listing 4.32 The PSO_UPDATE_INERTIA_WEIGHT state handle routine
/*************************************************
* Function:
*
* Parameters:
*
* Returns:
*
* Description:
*
**************************************************/
static void PSO_update_inertia_weight (void)
{
iw_update_methods(psos[cur_pso].env_data.iw_method);
PSO_current_state = PSO_EVALUATE; // move to the next state
psos[cur_pso].popu_index = 0; // satrt with the first particle
}
// Listing 4.20 & 4.25 The PSO_evaluate() routine
/*************************************************
* Function:
*
* Parameters:
*
* Returns:
*
* Description:
*
**************************************************/
static void PSO_evaluate (void)
{
if ((psos[cur_pso].popu_index) < (psos[cur_pso].popu_size))
{
evaluate_functions(psos[cur_pso].env_data.function_type);
PSO_current_state = PSO_UPDATE_LOCAL_BEST;
}
else // done with evaluation, move to the next state
{
PSO_current_state = PSO_GOAL_REACH_JUDGE ;
psos[cur_pso].popu_index = 0;
}
}
// Listing 4.26 The PSO_UPDATE_LOCAL_BEST state handle routine
/*************************************************
* Function:
*
* Parameters:
*
* Returns:
*
* Description:
*
**************************************************/
static void PSO_update_local_best (void)
{
int idx_i;
// here update local best
if ( (psos[cur_pso].env_data.opti_type) == MINIMIZATION)
{ // minimization problem
if ( (pso_cycle_index == 1) && ((psos[cur_pso].gene_index) == 0 ) )
{
psos[cur_pso].global_best_index = 0;
psos[cur_pso].pbest_values[psos[cur_pso].popu_index] = psos[cur_pso].eva_fun_value;
}
if ((psos[cur_pso].eva_fun_value) < (psos[cur_pso].pbest_values[psos[cur_pso].popu_index] ))
{
psos[cur_pso].pbest_values[psos[cur_pso].popu_index] = psos[cur_pso].eva_fun_value;
for (idx_i = 0; idx_i < (psos[cur_pso].dimension) ;idx_i++ )
{
(psos[cur_pso].pbest_position_values[psos[cur_pso].popu_index ][idx_i ] ) = (psos[cur_pso].position_values[psos[cur_pso].popu_index ][idx_i ] );
}
}
}
else
{ // maximization problem
if ( (pso_cycle_index == 1) && ((psos[cur_pso].gene_index) == 0 ) )
{
psos[cur_pso].global_best_index = 0;
psos[cur_pso].pbest_values[psos[cur_pso].popu_index] = psos[cur_pso].eva_fun_value;
}
if ((psos[cur_pso].eva_fun_value) > (psos[cur_pso].pbest_values[psos[cur_pso].popu_index] ))
{
psos[cur_pso].pbest_values[psos[cur_pso].popu_index] = psos[cur_pso].eva_fun_value;
for (idx_i = 0; idx_i < (psos[cur_pso].dimension) ;idx_i++ )
{
(psos[cur_pso].pbest_position_values[psos[cur_pso].popu_index ][idx_i ] ) = (psos[cur_pso].position_values[psos[cur_pso].popu_index ][idx_i ] );
}
}
}
PSO_current_state = PSO_UPDATE_GLOBAL_BEST ;
}
// Listing 4.27 The PSO_UPDATE_GLOBAL_BEST state handle routine
/*************************************************
* Function:
*
* Parameters:
*
* Returns:
*
* Description:
*
**************************************************/
static void PSO_update_global_best (void)
{
// here update global best
if ( (psos[cur_pso].env_data.opti_type) == MINIMIZATION)
{ // minimization problem
if ((psos[cur_pso].eva_fun_value) < (psos[cur_pso].pbest_values[psos[cur_pso].global_best_index ] ))
{
psos[cur_pso].global_best_index = psos[cur_pso].popu_index;
}
}
else
{ // maximization problem
if ((psos[cur_pso].eva_fun_value) > (psos[cur_pso].pbest_values[psos[cur_pso].global_best_index ] ))
{
psos[cur_pso].global_best_index = psos[cur_pso].popu_index;
}
}
PSO_current_state = PSO_UPDTAE_VELOCITY;
}
// Listing 4.28 The PSO_UPDATE_VELOCITY state handle routine
/*************************************************
* Function:
*
* Parameters:
*
* Returns:
*
* Description:
*
**************************************************/
static void PSO_update_velocity (void)
{
int idx_i;
// here update velocity
for (idx_i = 0; idx_i < (psos[cur_pso].dimension) ;idx_i++ )
{
psos[cur_pso].velocity_values[psos[cur_pso].popu_index ][idx_i ] =
(psos[cur_pso].inertia_weight) * (psos[cur_pso].velocity_values[psos[cur_pso].popu_index ][idx_i ] )
+ 2*((rand()%1000)/1000.0) * (psos[cur_pso].pbest_position_values[psos[cur_pso].popu_index ][idx_i ] - psos[cur_pso].position_values[psos[cur_pso].popu_index ][idx_i ] )
+ 2*((rand()%1000)/1000.0) * (psos[cur_pso].pbest_position_values[psos[cur_pso].global_best_index ][idx_i ] - psos[cur_pso].position_values[psos[cur_pso].popu_index ][idx_i ] );
if (psos[cur_pso].env_data.boundary_flag)
{
if ( fabs(psos[cur_pso].velocity_values[psos[cur_pso].popu_index ][idx_i ] ) > (0.5*((psos[cur_pso].env_data.up_boundaries[idx_i]) - (psos[cur_pso].env_data.low_boundaries[idx_i] ))))
{
if ((psos[cur_pso].velocity_values[psos[cur_pso].popu_index ][idx_i ] ) >= 0)
{
psos[cur_pso].velocity_values[psos[cur_pso].popu_index ][idx_i ] = 0.5*((psos[cur_pso].env_data.up_boundaries[idx_i]) - (psos[cur_pso].env_data.low_boundaries[idx_i] ));
}
else
{
psos[cur_pso].velocity_values[psos[cur_pso].popu_index ][idx_i ] = -0.5*((psos[cur_pso].env_data.up_boundaries[idx_i]) - (psos[cur_pso].env_data.low_boundaries[idx_i] ));
}
}
}
else
{
if ( (psos[cur_pso].velocity_values[psos[cur_pso].popu_index ][idx_i ] ) > (psos[cur_pso].env_data.max_velocity ) )
{
psos[cur_pso].velocity_values[psos[cur_pso].popu_index ][idx_i ] = psos[cur_pso].env_data.max_velocity;
}
else if ( (psos[cur_pso].velocity_values[psos[cur_pso].popu_index ][idx_i ] ) < (-(psos[cur_pso].env_data.max_velocity ) ) )
{
psos[cur_pso].velocity_values[psos[cur_pso].popu_index ][idx_i ] = -(psos[cur_pso].env_data.max_velocity );
}
}
}
PSO_current_state = PSO_UPDATE_POSITION; // go to the PSO_UPDATE_POSITION state
}
// Listing 4.29 The PSO_UPDATE_POSITION state handle routine
/*************************************************
* Function:
*
* Parameters:
*
* Returns:
*
* Description:
*
**************************************************/
static void PSO_update_position (void)
{
int idx_i;
for (idx_i = 0; idx_i < (psos[cur_pso].dimension) ;idx_i++ )
{
psos[cur_pso].position_values[psos[cur_pso].popu_index ][idx_i ] += psos[cur_pso].velocity_values[psos[cur_pso].popu_index ][idx_i ];
if (psos[cur_pso].env_data.boundary_flag)
{
if ((psos[cur_pso].position_values[psos[cur_pso].popu_index ][idx_i ]) < (psos[cur_pso].env_data.low_boundaries[idx_i] ))
{
psos[cur_pso].position_values[psos[cur_pso].popu_index ][idx_i ] = psos[cur_pso].env_data.low_boundaries[idx_i] + ((psos[cur_pso].env_data.up_boundaries[idx_i] - psos[cur_pso].env_data.low_boundaries[idx_i] ) * ((rand()%1000)/100000.0)); // low boundary + noise
}
else if ((psos[cur_pso].position_values[psos[cur_pso].popu_index ][idx_i ]) > (psos[cur_pso].env_data.up_boundaries[idx_i]))
{
psos[cur_pso].position_values[psos[cur_pso].popu_index ][idx_i ] = psos[cur_pso].env_data.up_boundaries[idx_i] - ((psos[cur_pso].env_data.up_boundaries[idx_i] - psos[cur_pso].env_data.low_boundaries[idx_i] ) * ((rand()%1000)/100000.0)); // up boundary - noise
}
}
}
PSO_current_state = PSO_EVALUATE; // go back to the PSO_EVALUATE state
(psos[cur_pso].popu_index)++;
}
// Listing 4.30 The PSO_GOAL_REACH_JUDGE state handle routine
/*************************************************
* Function:
*
* Parameters:
*
* Returns:
*
* Description:
*
**************************************************/
static void PSO_goal_reach_judge (void)
{
PSO_current_state = PSO_NEXT_GENERATION;
}
// The PSO_NEXT_GENERATION state handle routine
/*************************************************
* Function:
*
* Parameters:
*
* Returns:
*
* Description:
*
**************************************************/
static void PSO_next_generation (void)
{
if ((++(psos[cur_pso].gene_index)) < (psos[cur_pso].env_data.max_generation ) )
{ // next generation of the same population of PSO
PSO_current_state = PSO_UPDATE_INERTIA_WEIGHT;
}
else
{
if ( (++cur_pso ) >= NUM_PSO)
{ // end of the cycle
cur_pso = 0; // move to the first pso
}
PSO_current_state = PSO_UPDATE_PBEST_EACH_CYCLE; // move to the next state
psos[cur_pso].popu_index = 0;
}
}
// Listing 4.33 The PSO_UPDATE_PBEST_EACH_CYCLE state handle routine
/*************************************************
* Function:
*
* Parameters:
*
* Returns:
*
* Description:
*
**************************************************/
static void PSO_update_pbest_each_cycle (void)
{
if (PSO_UPDATE_PBEST_EACH_CYCLE_FLAG)
{
pso_update_pbest_each_cycle_pending = TRUE;
if ((psos[cur_pso].popu_index) < (psos[cur_pso].popu_size))
{
evaluate_functions(psos[cur_pso].env_data.function_type);
psos[cur_pso].pbest_values[psos[cur_pso].popu_index] = psos[cur_pso].eva_fun_value; // update pbest
psos[cur_pso].popu_index++;
}
else // done with evaluation, move to the next state
{
PSO_current_state = PSO_NEXT_PSO;
pso_update_pbest_each_cycle_pending = FALSE;
}
}
else
{
PSO_current_state = PSO_NEXT_PSO;
}
}
// Listing 4.34 The PSO_NEXT_PSO state handle routine
/*************************************************
* Function:
*
* Parameters:
*
* Returns:
*
* Description:
*
**************************************************/
static void PSO_next_pso (void)
{
// next PSO
if ( cur_pso > 0)
{
PSO_current_state = PSO_EVALUATE; // move to the EVALUATE state for the next pso in the same cycle
}
else
{ // all the PSOs have been processed
PSO_current_state = PSOS_DONE; // end of the cycle
}
psos[cur_pso].popu_index = 0; // satrt with the first particle
psos[cur_pso].gene_index = 0; // satrt with the first particle
}
// Listing 4.35 The PSOS_DONE state handle routine
/*************************************************
* Function:
*
* Parameters:
*
* Returns:
*
* Description:
*
**************************************************/
static void PSOs_done (void)
{
PSO_current_state = PSO_EVALUATE; // if start another cycle, start from PSO_EVALUATE
}
/*************************************************/
/* PSO Evaluation Functions */
/*************************************************/
/*************************************************
* Function:
*
* Parameters:
*
* Returns:
*
* Description:
*
**************************************************/
static void evaluate_functions (int fun_type)
{
switch (fun_type)
{
case G1_MIN:
g1_min();
break;
case G1_MAX:
g1_max();
break;
case G7_MIN:
g7_min();
break;
case G7_MAX:
g7_max();
break;
case G9_MIN:
g9_min();
break;
case G9_MAX:
g9_max();
break;
case F6:
f6();
break;
case SPHERE:
sphere();
break;
case ROSENBROCK:
rosenbrock();
break;
case RASTRIGRIN:
rastrigrin();
break;
case GRIEWANK:
griewank();
break;
default:
printf("wrong function type\n");
exit(1);
break;
}
}
/*************************************************
* Function:
*
* Parameters:
*
* Returns:
*
* Description:
*
**************************************************/
static void g1_min (void)
{
int idx_i,idx_j;
double x[13];
double y[9];
double r[9];
double p[9];
double fit_value;
int env_pso;
double temp_max,temp_value;
if ((psos[0].dimension != 13) || (psos[1].dimension != 9))
{
- printf("G1 wrong function dimention\n");
+ printf("G1 wrong function dimension\n");
exit(1);
}
env_pso = (cur_pso)?0:1;
if (pso_update_pbest_each_cycle_pending)
{
for (idx_i = 0; idx_i <13 ; idx_i++ )
{
x[idx_i] = psos[cur_pso].pbest_position_values[psos[cur_pso].popu_index][idx_i];
}
}
else
{
for (idx_i = 0; idx_i <13 ; idx_i++ )
{
x[idx_i] = psos[cur_pso].position_values[psos[cur_pso].popu_index][idx_i];
}
}
// constrains
r[0] = 2 * x[0] + 2 * x[1] + x[9] + x[10] - 10;
r[1] = 2 * x[0] + 2 * x[2] + x[9] + x[11] - 10;
r[2] = 2 * x[1] + 2 * x[2] + x[10] + x[11] - 10;
r[3] = -8 * x[0] + x[9];
r[4] = -8 * x[1] + x[10];
r[5] = -8 * x[2] + x[11];
r[6] = -2 * x[3] - x[4] + x[9];
r[7] = -2 * x[5] - x[6] + x[10];
r[8] = -2 * x[7] - x[8] + x[11];
for (idx_i = 0; idx_i < (psos[env_pso].popu_size) ; idx_i++ )
{
if (pso_update_pbest_each_cycle_pending)
{
for (idx_j = 0; idx_j <9 ; idx_j++ )
{
y[idx_j] = psos[env_pso].pbest_position_values[idx_i][idx_j];
}
}
else
{
for (idx_j = 0; idx_j <9 ; idx_j++ )
{
y[idx_j] = psos[env_pso].position_values[idx_i][idx_j];
}
}
temp_value = 0.0;
for (idx_j = 0; idx_j <4 ; idx_j++ )
{
temp_value += 5 * (x[idx_j] - x[idx_j] * x[idx_j]);
}
for (idx_j = 4; idx_j <13 ; idx_j++ )
{
temp_value -= x[idx_j];
}
for (idx_j = 0; idx_j <9 ; idx_j++ )
{
if ((r[idx_j]) >= (-y[idx_j]/200.0))
{
p[idx_j] = y[idx_j] * r[idx_j] + 100 * r[idx_j] * r[idx_j];
}
else
{
p[idx_j] = - y[idx_j] * y[idx_j]/400.0;
}
}
for (idx_j = 0; idx_j <9 ; idx_j++ )
{
temp_value += p[idx_j];
}
if (idx_i == 0)
{
temp_max = temp_value;
fit_value = temp_max;
}
else
{
fit_value = max(temp_value,temp_max);
temp_max = fit_value;
}
}
psos[cur_pso].eva_fun_value = fit_value;
}
/*************************************************
* Function:
*
* Parameters:
*
* Returns:
*
* Description:
*
**************************************************/
static void g1_max (void)
{
int idx_i,idx_j;
double x[13];
double y[9];
double r[9];
double p[9];
double fit_value;
int env_pso;
double temp_min,temp_value;
if ((psos[0].dimension != 13) || (psos[1].dimension != 9))
{
- printf("G1 wrong function dimention\n");
+ printf("G1 wrong function dimension\n");
exit(1);
}
env_pso = (cur_pso)?0:1;
if (pso_update_pbest_each_cycle_pending)
{
for (idx_i = 0; idx_i <9 ; idx_i++ )
{
y[idx_i] = psos[cur_pso].pbest_position_values[psos[cur_pso].popu_index][idx_i];
}
}
else
{
for (idx_i = 0; idx_i <9 ; idx_i++ )
{
y[idx_i] = psos[cur_pso].position_values[psos[cur_pso].popu_index][idx_i];
}
}
for (idx_i = 0; idx_i < (psos[env_pso].popu_size) ; idx_i++ )
{
if (pso_update_pbest_each_cycle_pending)
{
for (idx_j = 0; idx_j <13 ; idx_j++ )
{
x[idx_j] = psos[env_pso].pbest_position_values[idx_i][idx_j];
}
}
else
{
for (idx_j = 0; idx_j <13 ; idx_j++ )
{
x[idx_j] = psos[env_pso].position_values[idx_i][idx_j];
}
}
// constrains
r[0] = 2 * x[0] + 2 * x[1] + x[9] + x[10] - 10;
r[1] = 2 * x[0] + 2 * x[2] + x[9] + x[11] - 10;
r[2] = 2 * x[1] + 2 * x[2] + x[10] + x[11] - 10;
r[3] = -8 * x[0] + x[9];
r[4] = -8 * x[1] + x[10];
r[5] = -8 * x[2] + x[11];
r[6] = -2 * x[3] - x[4] + x[9];
r[7] = -2 * x[5] - x[6] + x[10];
r[8] = -2 * x[7] - x[8] + x[11];
temp_value = 0.0;
for (idx_j = 0; idx_j <4 ; idx_j++ )
{
temp_value += 5 * (x[idx_j] - x[idx_j] * x[idx_j]);
}
for (idx_j = 4; idx_j <13 ; idx_j++ )
{
temp_value -= x[idx_j];
}
for (idx_j = 0; idx_j <9 ; idx_j++ )
{
if ((r[idx_j]) >= (-y[idx_j]/200.0))
{
p[idx_j] = y[idx_j] * r[idx_j] + 100 * r[idx_j] * r[idx_j];
}
else
{
p[idx_j] = - y[idx_j] * y[idx_j]/400.0;
}
}
for (idx_j = 0; idx_j <9 ; idx_j++ )
{
temp_value += p[idx_j];
}
if (idx_i == 0)
{
temp_min = temp_value;
fit_value = temp_min;
}
else
{
fit_value = min(temp_value,temp_min);
temp_min = fit_value;
}
}
psos[cur_pso].eva_fun_value = fit_value;
}
/*************************************************
* Function:
*
* Parameters:
*
* Returns:
*
* Description:
*
**************************************************/
static void g7_min (void)
{
int idx_i;
double x1, x2, x3, x4, x5, x6, x7, x8, x9, x10;
double y1, y2, y3, y4, y5, y6, y7, y8;
double r1, r2, r3, r4, r5, r6, r7, r8;
double p1, p2, p3, p4, p5, p6, p7, p8;
double fit_value;
int env_pso;
double temp_max,temp_value;
if ((psos[0].dimension != 10) || (psos[1].dimension != 8))
{
- printf("G7 wrong function dimention\n");
+ printf("G7 wrong function dimension\n");
exit(1);
}
env_pso = (cur_pso)?0:1;
if (pso_update_pbest_each_cycle_pending)
{
x1 = psos[cur_pso].pbest_position_values[psos[cur_pso].popu_index][0];
x2 = psos[cur_pso].pbest_position_values[psos[cur_pso].popu_index][1];
x3 = psos[cur_pso].pbest_position_values[psos[cur_pso].popu_index][2];
x4 = psos[cur_pso].pbest_position_values[psos[cur_pso].popu_index][3];
x5 = psos[cur_pso].pbest_position_values[psos[cur_pso].popu_index][4];
x6 = psos[cur_pso].pbest_position_values[psos[cur_pso].popu_index][5];
x7 = psos[cur_pso].pbest_position_values[psos[cur_pso].popu_index][6];
x8 = psos[cur_pso].pbest_position_values[psos[cur_pso].popu_index][7];
x9 = psos[cur_pso].pbest_position_values[psos[cur_pso].popu_index][8];
x10 = psos[cur_pso].pbest_position_values[psos[cur_pso].popu_index][9];
}
else
{
x1 = psos[cur_pso].position_values[psos[cur_pso].popu_index][0];
x2 = psos[cur_pso].position_values[psos[cur_pso].popu_index][1];
x3 = psos[cur_pso].position_values[psos[cur_pso].popu_index][2];
x4 = psos[cur_pso].position_values[psos[cur_pso].popu_index][3];
x5 = psos[cur_pso].position_values[psos[cur_pso].popu_index][4];
x6 = psos[cur_pso].position_values[psos[cur_pso].popu_index][5];
x7 = psos[cur_pso].position_values[psos[cur_pso].popu_index][6];
x8 = psos[cur_pso].position_values[psos[cur_pso].popu_index][7];
x9 = psos[cur_pso].position_values[psos[cur_pso].popu_index][8];
x10 = psos[cur_pso].position_values[psos[cur_pso].popu_index][9];
}
// restrictions
r1= -(105- 4*x1- 5*x2+ 3*x7- 9*x8);
r2= -(-3*pow(x1-2, 2)- 4*pow(x2-3, 2)- 2*x3*x3+ 7*x4+ 120);
r3= -(-10*x1+ 8*x2+ 17*x7- 2*x8);
r4= -(-x1*x1- 2*pow(x2-2, 2)+ 2*x1*x2- 14*x5+ 6*x6);
r5= -(8*x1- 2*x2- 5*x9+ 2*x10+12);
r6= -(-5*x1*x1- 8*x2- pow(x3-6, 2)+ 2*x4+ 40);
r7= -(3*x1 -6*x2- 12*pow(x9-8, 2)+ 7*x10);
r8= -(-0.5*pow(x1-8, 2)- 2*(x2-4)- 3*x5*x5+ x6+ 30);
for (idx_i = 0; idx_i < (psos[env_pso].popu_size) ; idx_i++ )
{
if (pso_update_pbest_each_cycle_pending)
{
y1 = psos[env_pso].pbest_position_values[idx_i][0];
y2 = psos[env_pso].pbest_position_values[idx_i][1];
y3 = psos[env_pso].pbest_position_values[idx_i][2];
y4 = psos[env_pso].pbest_position_values[idx_i][3];
y5 = psos[env_pso].pbest_position_values[idx_i][4];
y6 = psos[env_pso].pbest_position_values[idx_i][5];
y7 = psos[env_pso].pbest_position_values[idx_i][6];
y8 = psos[env_pso].pbest_position_values[idx_i][7];
}
else
{
y1 = psos[env_pso].position_values[idx_i][0];
y2 = psos[env_pso].position_values[idx_i][1];
y3 = psos[env_pso].position_values[idx_i][2];
y4 = psos[env_pso].position_values[idx_i][3];
y5 = psos[env_pso].position_values[idx_i][4];
y6 = psos[env_pso].position_values[idx_i][5];
y7 = psos[env_pso].position_values[idx_i][6];
y8 = psos[env_pso].position_values[idx_i][7];
}
temp_value = x1*x1+ x2*x2+ x1*x2- 14*x1 -16*x2+ pow(x3-10, 2)
+4*pow(x4-5,2)+ pow(x5-3, 2)+ 2*pow(x6-1, 2)+ 5*x7*x7
+7*pow(x8-11, 2)+ 2*pow(x9-10, 2)+ pow(x10-7, 2)+ 45;
if ((r1) >= (-y1/200.0))
{
p1 = y1 * r1 + 100 * r1 * r1;
}
else
{
p1 = - y1*y1/400.0;
}
if ((r2) >= (-y2/200.0))
{
p2 = y2 * r2 + 100 * r2 * r2;
}
else
{
p2 = - y2*y2/400.0;
}
if ((r3) >= (-y3/200.0))
{
p3 = y3 * r3 + 100 * r3 * r3;
}
else
{
p3 = - y3*y3/400.0;
}
if ((r4) >= (-y4/200.0))
{
p4 = y4 * r4 + 100 * r4 * r4;
}
else
{
p4 = - y4*y4/400.0;
}
if ((r5) >= (-y5/200.0))
{
p5 = y5 * r5 + 100 * r5 * r5;
}
else
{
p5 = - y5*y5/400.0;
}
if ((r6) >= (-y6/200.0))
{
p6 = y6 * r6 + 100 * r6 * r6;
}
else
{
p6 = - y6*y6/400.0;
}
if ((r7) >= (-y7/200.0))
{
p7 = y7 * r7 + 100 * r7 * r7;
}
else
{
p7 = - y7*y7/400.0;
}
if ((r8) >= (-y8/200.0))
{
p8 = y8 * r8 + 100 * r8 * r8;
}
else
{
p8 = - y8*y8/400.0;
}
temp_value += p1 + p2 + p3 + p4 + p5 + p6 + p7 + p8;
//temp_value += (y1*r1 +y2*r2 +y3*r3 +y4*r4 +y5*r5 +y6*r6 +y7*r7+y8*r8);
if (idx_i == 0)
{
temp_max = temp_value;
fit_value = temp_max;
}
else
{
fit_value = max(temp_value,temp_max);
temp_max = fit_value;
}
}
psos[cur_pso].eva_fun_value = fit_value;
}
/*************************************************
* Function:
*
* Parameters:
*
* Returns:
*
* Description:
*
**************************************************/
static void g7_max (void)
{
int idx_i;
double x1, x2, x3, x4, x5, x6, x7, x8, x9, x10;
double y1, y2, y3, y4, y5, y6, y7, y8;
double r1, r2, r3, r4, r5, r6, r7, r8;
double p1, p2, p3, p4, p5, p6, p7, p8;
double fit_value;
int env_pso;
double temp_min,temp_value;
if ((psos[0].dimension != 10) || (psos[1].dimension != 8))
{
- printf("G7 wrong function dimention\n");
+ printf("G7 wrong function dimension\n");
exit(1);
}
env_pso = (cur_pso)?0:1;
if (pso_update_pbest_each_cycle_pending)
{
y1 = psos[cur_pso].pbest_position_values[psos[cur_pso].popu_index][0];
y2 = psos[cur_pso].pbest_position_values[psos[cur_pso].popu_index][1];
y3 = psos[cur_pso].pbest_position_values[psos[cur_pso].popu_index][2];
y4 = psos[cur_pso].pbest_position_values[psos[cur_pso].popu_index][3];
y5 = psos[cur_pso].pbest_position_values[psos[cur_pso].popu_index][4];
y6 = psos[cur_pso].pbest_position_values[psos[cur_pso].popu_index][5];
y7 = psos[cur_pso].pbest_position_values[psos[cur_pso].popu_index][6];
y8 = psos[cur_pso].pbest_position_values[psos[cur_pso].popu_index][7];
}
else
{
y1 = psos[cur_pso].position_values[psos[cur_pso].popu_index][0];
y2 = psos[cur_pso].position_values[psos[cur_pso].popu_index][1];
y3 = psos[cur_pso].position_values[psos[cur_pso].popu_index][2];
y4 = psos[cur_pso].position_values[psos[cur_pso].popu_index][3];
y5 = psos[cur_pso].position_values[psos[cur_pso].popu_index][4];
y6 = psos[cur_pso].position_values[psos[cur_pso].popu_index][5];
y7 = psos[cur_pso].position_values[psos[cur_pso].popu_index][6];
y8 = psos[cur_pso].position_values[psos[cur_pso].popu_index][7];
}
for (idx_i = 0; idx_i < (psos[env_pso].popu_size) ; idx_i++ )
{
if (pso_update_pbest_each_cycle_pending)
{
x1 = psos[env_pso].pbest_position_values[idx_i][0];
x2 = psos[env_pso].pbest_position_values[idx_i][1];
x3 = psos[env_pso].pbest_position_values[idx_i][2];
x4 = psos[env_pso].pbest_position_values[idx_i][3];
x5 = psos[env_pso].pbest_position_values[idx_i][4];
x6 = psos[env_pso].pbest_position_values[idx_i][5];
x7 = psos[env_pso].pbest_position_values[idx_i][6];
x8 = psos[env_pso].pbest_position_values[idx_i][7];
x9 = psos[env_pso].pbest_position_values[idx_i][8];
x10 = psos[env_pso].pbest_position_values[idx_i][9];
}
else
{
x1 = psos[env_pso].position_values[idx_i][0];
x2 = psos[env_pso].position_values[idx_i][1];
x3 = psos[env_pso].position_values[idx_i][2];
x4 = psos[env_pso].position_values[idx_i][3];
x5 = psos[env_pso].position_values[idx_i][4];
x6 = psos[env_pso].position_values[idx_i][5];
x7 = psos[env_pso].position_values[idx_i][6];
x8 = psos[env_pso].position_values[idx_i][7];
x9 = psos[env_pso].position_values[idx_i][8];
x10 = psos[env_pso].position_values[idx_i][9];
}
r1= -(105- 4*x1- 5*x2+ 3*x7- 9*x8);
r2= -(-3*pow(x1-2, 2)- 4*pow(x2-3, 2)- 2*x3*x3+ 7*x4+ 120);
r3= -(-10*x1+ 8*x2+ 17*x7- 2*x8);
r4= -(-x1*x1- 2*pow(x2-2, 2)+2*x1*x2- 14*x5+ 6*x6);
r5= -(8*x1- 2*x2- 5*x9+ 2*x10+12);
r6= -(-5*x1*x1- 8*x2- 1*pow(x3-6, 2)+ 2*x4+ 40);
r7= -(3*x1 -6*x2- 12*pow(x9-8, 2)+ 7*x10);
r8= -(-0.5*pow(x1-8, 2)- 2*(x2-4)- 3*x5*x5+ x6+ 30);
temp_value = x1*x1+ x2*x2+ x1*x2- 14*x1 -16*x2+ pow(x3-10, 2)
+4*pow(x4-5,2)+ pow(x5-3, 2)+ 2*pow(x6-1, 2)+ 5*x7*x7
+7*pow(x8-11, 2)+ 2*pow(x9-10, 2)+ pow(x10-7, 2)+ 45;
if ((r1) >= (-y1/200.0))
{
p1 = y1 * r1 + 100 * r1 * r1;
}
else
{
p1 = - y1*y1/400.0;
}
if ((r2) >= (-y2/200.0))
{
p2 = y2 * r2 + 100 * r2 * r2;
}
else
{
p2 = - y2*y2/400.0;
}
if ((r3) >= (-y3/200.0))
{
p3 = y3 * r3 + 100 * r3 * r3;
}
else
{
p3 = - y3*y3/400.0;
}
if ((r4) >= (-y4/200.0))
{
p4 = y4 * r4 + 100 * r4 * r4;
}
else
{
p4 = - y4*y4/400.0;
}
if ((r5) >= (-y5/200.0))
{
p5 = y5 * r5 + 100 * r5 * r5;
}
else
{
p5 = - y5*y5/400.0;
}
if ((r6) >= (-y6/200.0))
{
p6 = y6 * r6 + 100 * r6 * r6;
}
else
{
p6 = - y6*y6/400.0;
}
if ((r7) >= (-y7/200.0))
{
p7 = y7 * r7 + 100 * r7 * r7;
}
else
{
p7 = - y7*y7/400.0;
}
if ((r8) >= (-y8/200.0))
{
p8 = y8 * r8 + 100 * r8 * r8;
}
else
{
p8 = - y8*y8/400.0;
}
temp_value += p1 + p2 + p3 + p4 + p5 + p6 + p7 + p8;
//temp_value += (y1*r1 +y2*r2 +y3*r3 +y4*r4 +y5*r5 +y6*r6 +y7*r7+y8*r8);
if (idx_i == 0)
{
temp_min = temp_value;
fit_value = temp_min;
}
else
{
fit_value = min(temp_value,temp_min);
temp_min = fit_value;
}
}
psos[cur_pso].eva_fun_value = fit_value;
}
/*************************************************
* Function:
*
* Parameters:
*
* Returns:
*
* Description:
*
**************************************************/
static void g9_min (void)
{
int idx_i,idx_j;
double x[7];
double y[4];
double r[4];
double p[4];
double fit_value;
int env_pso;
double temp_max,temp_value;
if ((psos[0].dimension != 7) || (psos[1].dimension != 4))
{
- printf("G9 wrong function dimention\n");
+ printf("G9 wrong function dimension\n");
exit(1);
}
env_pso = (cur_pso)?0:1;
if (pso_update_pbest_each_cycle_pending)
{
for (idx_i = 0; idx_i <7 ; idx_i++ )
{
x[idx_i] = psos[cur_pso].pbest_position_values[psos[cur_pso].popu_index][idx_i];
}
}
else
{
for (idx_i = 0; idx_i <7 ; idx_i++ )
{
x[idx_i] = psos[cur_pso].position_values[psos[cur_pso].popu_index][idx_i];
}
}
// constrains
r[0] = 2 * x[0] * x[0] + 3 * pow(x[1],4) + x[2] + 4 * x[3] * x[3] + 5 * x[4] - 127;
r[1] = 7 * x[0] + 3 * x[1] + 10 * x[2] * x[2] + x[3] - x[4] - 282;
r[2] = 23 * x[0] + x[1] * x[1] + 6 * x[5] * x[5] - 8 * x[6] - 196;
r[3] = 4 * x[0] * x[0] + x[1] * x[1] - 3 * x[0] * x[1] + 2 * x[2] * x[2] + 5 * x[5] - 11 * x[6];
for (idx_i = 0; idx_i < (psos[env_pso].popu_size) ; idx_i++ )
{
if (pso_update_pbest_each_cycle_pending)
{
for (idx_j = 0; idx_j <4 ; idx_j++ )
{
y[idx_j] = psos[env_pso].pbest_position_values[idx_i][idx_j];
}
}
else
{
for (idx_j = 0; idx_j <4 ; idx_j++ )
{
y[idx_j] = psos[env_pso].position_values[idx_i][idx_j];
}
}
temp_value = pow((x[0] - 10),2) + 5 * pow((x[1] - 12),2) + pow(x[2],4) + 3 * pow((x[3] - 11),2)
+ 10 * pow((x[4]),6) + 7 * pow(x[5],2) + pow(x[6],4) - 4 * x[5] * x[6] - 10 * x[5] - 8 * x[6];
for (idx_j = 0; idx_j <4 ; idx_j++ )
{
if ((r[idx_j]) >= (-y[idx_j]/200.0))
{
p[idx_j] = y[idx_j] * r[idx_j] + 100 * r[idx_j] * r[idx_j];
}
else
{
p[idx_j] = - y[idx_j] * y[idx_j]/400.0;
}
temp_value += p[idx_j];
}
if (idx_i == 0)
{
temp_max = temp_value;
fit_value = temp_max;
}
else
{
fit_value = max(temp_value,temp_max);
temp_max = fit_value;
}
}
psos[cur_pso].eva_fun_value = fit_value;
}
/*************************************************
* Function:
*
* Parameters:
*
* Returns:
*
* Description:
*
**************************************************/
static void g9_max (void)
{
int idx_i,idx_j;
double x[7];
double y[4];
double r[4];
double p[4];
double fit_value;
int env_pso;
double temp_min,temp_value;
if ((psos[0].dimension != 7) || (psos[1].dimension != 4))
{
- printf("G9 wrong function dimention\n");
+ printf("G9 wrong function dimension\n");
exit(1);
}
env_pso = (cur_pso)?0:1;
if (pso_update_pbest_each_cycle_pending)
{
for (idx_i = 0; idx_i <4 ; idx_i++ )
{
y[idx_i] = psos[cur_pso].pbest_position_values[psos[cur_pso].popu_index][idx_i];
}
}
else
{
for (idx_i = 0; idx_i <4 ; idx_i++ )
{
y[idx_i] = psos[cur_pso].position_values[psos[cur_pso].popu_index][idx_i];
}
}
for (idx_i = 0; idx_i < (psos[env_pso].popu_size) ; idx_i++ )
{
if (pso_update_pbest_each_cycle_pending)
{
for (idx_j = 0; idx_j <7 ; idx_j++ )
{
x[idx_j] = psos[env_pso].pbest_position_values[idx_i][idx_j];
}
}
else
{
for (idx_j = 0; idx_j <7 ; idx_j++ )
{
x[idx_j] = psos[env_pso].position_values[idx_i][idx_j];
}
}
// constrains
r[0] = 2 * x[0] * x[0] + 3 * pow(x[1],4) + x[2] + 4 * x[3] * x[3] + 5 * x[4] - 127;
r[1] = 7 * x[0] + 3 * x[1] + 10 * x[2] * x[2] + x[3] - x[4] - 282;
r[2] = 23 * x[0] + x[1] * x[1] + 6 * x[5] * x[5] - 8 * x[6] - 196;
r[3] = 4 * x[0] * x[0] + x[1] * x[1] - 3 * x[0] * x[1] + 2 * x[2] * x[2] + 5 * x[5] - 11 * x[6];
temp_value = pow((x[0] - 10),2) + 5 * pow((x[1] - 12),2) + pow(x[2],4) + 3 * pow((x[3] - 11),2)
+ 10 * pow((x[4]),6) + 7 * pow(x[5],2) + pow(x[6],4) - 4 * x[5] * x[6] - 10 * x[5] - 8 * x[6];
for (idx_j = 0; idx_j <4 ; idx_j++ )
{
if ((r[idx_j]) >= (-y[idx_j]/200.0))
{
p[idx_j] = y[idx_j] * r[idx_j] + 100 * r[idx_j] * r[idx_j];
}
else
{
p[idx_j] = - y[idx_j] * y[idx_j]/400.0;
}
temp_value += p[idx_j];
}
if (idx_i == 0)
{
temp_min = temp_value;
fit_value = temp_min;
}
else
{
fit_value = min(temp_value,temp_min);
temp_min = fit_value;
}
}
psos[cur_pso].eva_fun_value = fit_value;
}
/*************************************************
* Function:
*
* Parameters:
*
* Returns:
*
* Description:
*
**************************************************/
static void f6 (void)
{
/*
This is the f6 function as described in the Handbook of
Genetic Algorithms, p.8
*/
double num, denom, f6;
if (psos[0].dimension != 2)
{
- printf("f6 wrong function dimention\n");
+ printf("f6 wrong function dimension\n");
exit(1);
}
num = (sin(sqrt((psos[cur_pso].position_values[psos[cur_pso].popu_index][0]*psos[cur_pso].position_values[psos[cur_pso].popu_index][0])+(psos[cur_pso].position_values[psos[cur_pso].popu_index][1]*psos[cur_pso].position_values[psos[cur_pso].popu_index][1])))) *
(sin(sqrt((psos[cur_pso].position_values[psos[cur_pso].popu_index][0]*psos[cur_pso].position_values[psos[cur_pso].popu_index][0])+(psos[cur_pso].position_values[psos[cur_pso].popu_index][1]*psos[cur_pso].position_values[psos[cur_pso].popu_index][1])))) - 0.5;
denom = (1.0 + 0.001 * ((psos[cur_pso].position_values[psos[cur_pso].popu_index][0] * psos[cur_pso].position_values[psos[cur_pso].popu_index][0]) + (psos[cur_pso].position_values[psos[cur_pso].popu_index][1]*psos[cur_pso].position_values[psos[cur_pso].popu_index][1]))) *
(1.0 + 0.001 * ((psos[cur_pso].position_values[psos[cur_pso].popu_index][0] * psos[cur_pso].position_values[psos[cur_pso].popu_index][0]) + (psos[cur_pso].position_values[psos[cur_pso].popu_index][1]*psos[cur_pso].position_values[psos[cur_pso].popu_index][1])));
f6 = (double) 0.5 - (num/denom);
psos[cur_pso].eva_fun_value = 1 - f6;
}
/*************************************************
* Function:
*
* Parameters:
*
* Returns:
*
* Description:
*
**************************************************/
static void sphere (void)
{
/* This is the familiar sphere model */
double result;
int idx_i;
result=0.0;
for (idx_i = 0; idx_i < ( psos[cur_pso].dimension ); idx_i++)
{
result += psos[cur_pso].position_values[psos[cur_pso].popu_index][idx_i] * psos[cur_pso].position_values[psos[cur_pso].popu_index][idx_i];
}
psos[cur_pso].eva_fun_value = result;
}
/*************************************************
* Function:
*
* Parameters:
*
* Returns:
*
* Description:
*
**************************************************/
static void rosenbrock (void)
{
/* this is the Rosenbrock function */
int idx_i;
double result;
result=0.0;
for (idx_i = 1; idx_i < ( psos[cur_pso].dimension ); idx_i++)
{
result += 100.0*(psos[cur_pso].position_values[psos[cur_pso].popu_index][idx_i] - psos[cur_pso].position_values[psos[cur_pso].popu_index][idx_i - 1] * psos[cur_pso].position_values[psos[cur_pso].popu_index][idx_i - 1]) * (psos[cur_pso].position_values[psos[cur_pso].popu_index][idx_i] - psos[cur_pso].position_values[psos[cur_pso].popu_index][idx_i - 1] * psos[cur_pso].position_values[psos[cur_pso].popu_index][idx_i - 1]) + (psos[cur_pso].position_values[psos[cur_pso].popu_index][idx_i - 1]-1) * (psos[cur_pso].position_values[psos[cur_pso].popu_index][idx_i - 1]-1);
}
psos[cur_pso].eva_fun_value = fabs(result);
}
/*************************************************
* Function:
*
* Parameters:
*
* Returns:
*
* Description:
*
**************************************************/
static void rastrigrin (void)
{
/* This is the generalized Rastrigrin function */
int idx_i;
double result;
result=0.0;
for (idx_i = 0;idx_i < ( psos[cur_pso].dimension ); idx_i++)
{
result +=psos[cur_pso].position_values[psos[cur_pso].popu_index][idx_i] * psos[cur_pso].position_values[psos[cur_pso].popu_index][idx_i] - 10.0*cos(2.0*3.141591 * psos[cur_pso].position_values[psos[cur_pso].popu_index][idx_i])+10.0;
}
psos[cur_pso].eva_fun_value = result;
}
/*************************************************
* Function:
*
* Parameters:
*
* Returns:
*
* Description:
*
**************************************************/
static void griewank (void)
{
/* This is the generalized Griewank function */
int idx_i;
double result_s,result_p;
result_s=0.0;
result_p=1.0;
for (idx_i = 0; idx_i < ( psos[cur_pso].dimension ); idx_i++)
{
result_s +=psos[cur_pso].position_values[psos[cur_pso].popu_index][idx_i] * psos[cur_pso].position_values[psos[cur_pso].popu_index][idx_i];
result_p *=cos(psos[cur_pso].position_values[psos[cur_pso].popu_index][idx_i]/sqrt(idx_i+1));
}
psos[cur_pso].eva_fun_value = result_s/4000.0 - result_p +1;
}
/*************************************************/
/* Inertia Weight Update Functions */
/*************************************************/
/*************************************************
* Function:
*
* Parameters:
*
* Returns:
*
* Description:
*
**************************************************/
static void iw_update_methods (int iw_type)
{
switch (iw_type)
{
case CONSTANT_IW:
constant_iw();
break;
case LINEAR_IW:
linear_iw();
break;
case NOISE_ADDITION_IW:
noise_addition_iw();
break;
default:
break;
}
}
/*************************************************
* Function:
*
* Parameters:
*
* Returns:
*
* Description:
*
**************************************************/
static void constant_iw (void)
{
psos[cur_pso].inertia_weight = psos[cur_pso].init_inertia_weight;
}
/*************************************************
* Function:
*
* Parameters:
*
* Returns:
*
* Description:
*
**************************************************/
static void linear_iw (void)
{
int total_gen,cur_index;
total_gen = total_cycle_of_PSOs * psos[cur_pso].env_data.max_generation;
cur_index = pso_cycle_index * psos[cur_pso].env_data.max_generation + psos[cur_pso].gene_index;
psos[cur_pso].inertia_weight = ((psos[cur_pso].init_inertia_weight ) - 0.4 ) * ( total_gen - cur_index) / total_gen + 0.4 + ((rand()%600)/1000.0) - 0.3;
}
/*************************************************
* Function:
*
* Parameters:
*
* Returns:
*
* Description:
*
**************************************************/
static void noise_addition_iw (void)
{
psos[cur_pso].inertia_weight = psos[cur_pso].init_inertia_weight + ((rand()%600)/1000.0) - 0.3 ;
}
/*************************************************/
/* Initizalization Functions */
/*************************************************/
/*************************************************
* Function:
*
* Parameters:
*
* Returns:
*
* Description: Symmetry Initialization
*
**************************************************/
static void PSO_initialize_handler (int init_type)
{
switch (init_type)
{
case PSO_RANDOM_SYMMETRY_INITIALIZE:
PSO_random_symmetry_initialize();
break;
case PSO_RANDOM_ASYMMETRY_INITIALIZE:
PSO_random_asymmetry_initialize();
break;
default:
break;
}
}
/*************************************************
* Function:
*
* Parameters:
*
* Returns:
*
* Description: Symmetry Initialization
*
**************************************************/
static void PSO_random_symmetry_initialize (void)
{
int b;
for (b=0;b<(psos[cur_pso].dimension);b++)
{
if (psos[cur_pso].env_data.boundary_flag)
{
(psos[cur_pso].position_values)[psos[cur_pso].popu_index][b] = (float)((psos[cur_pso].env_data.up_boundaries[b]) - (psos[cur_pso].env_data.low_boundaries[b] ) ) * ((rand()%1000)/1000.0) + (psos[cur_pso].env_data.low_boundaries[b] );
psos[cur_pso].pbest_position_values[psos[cur_pso].popu_index][b] = (psos[cur_pso].position_values)[psos[cur_pso].popu_index][b];
psos[cur_pso].velocity_values[psos[cur_pso].popu_index][b] = 0.5* ((psos[cur_pso].env_data.up_boundaries[b]) - (psos[cur_pso].env_data.low_boundaries[b] ) ) * ((rand()%1000)/1000.0);
}
else
{
((psos[cur_pso].position_values)[psos[cur_pso].popu_index][b] ) = (float) (((psos[cur_pso].env_data.init_range.right) - (psos[cur_pso].env_data.init_range.left ))*((rand()%1000)/1000.0) + (psos[cur_pso].env_data.init_range.left ));
psos[cur_pso].pbest_position_values[psos[cur_pso].popu_index][b] = (psos[cur_pso].position_values)[psos[cur_pso].popu_index][b];
psos[cur_pso].velocity_values[psos[cur_pso].popu_index][b] = (psos[cur_pso].env_data.max_velocity)*((rand()%1000)/1000.0);
}
if (((rand()%1000)/1000.0) > 0.5)
{
psos[cur_pso].velocity_values[psos[cur_pso].popu_index][b] = -(psos[cur_pso].velocity_values[psos[cur_pso].popu_index][b] );
}
}
}
/*************************************************
* Function:
*
* Parameters:
*
* Returns:
*
* Description: Asymmetry initialization
*
**************************************************/
static void PSO_random_asymmetry_initialize (void)
{
int b;
for (b=0;b<(psos[cur_pso].dimension);b++)
{
if (psos[cur_pso].env_data.boundary_flag)
{
(psos[cur_pso].position_values)[psos[cur_pso].popu_index][b] = (float)((psos[cur_pso].env_data.up_boundaries[b]) - (psos[cur_pso].env_data.low_boundaries[b] ) ) * ((rand()%1000)/1000.0) + (psos[cur_pso].env_data.low_boundaries[b] );
psos[cur_pso].pbest_position_values[psos[cur_pso].popu_index][b] = (psos[cur_pso].position_values)[psos[cur_pso].popu_index][b];
psos[cur_pso].velocity_values[psos[cur_pso].popu_index][b] = 0.5* ((psos[cur_pso].env_data.up_boundaries[b]) - (psos[cur_pso].env_data.low_boundaries[b] ) ) * ((rand()%1000)/1000.0);
}
else
{
((psos[cur_pso].position_values)[psos[cur_pso].popu_index][b] ) = (float) (((psos[cur_pso].env_data.init_range.right) - (psos[cur_pso].env_data.init_range.left ))*((rand()%1000)/1000.0) + (psos[cur_pso].env_data.init_range.left ));
psos[cur_pso].pbest_position_values[psos[cur_pso].popu_index][b] = (psos[cur_pso].position_values)[psos[cur_pso].popu_index][b];
psos[cur_pso].velocity_values[psos[cur_pso].popu_index][b] = (psos[cur_pso].env_data.max_velocity)*((rand()%1000)/1000.0);
}
if (((rand()%1000)/1000.0) > 0.5)
{
psos[cur_pso].velocity_values[psos[cur_pso].popu_index][b] = -(psos[cur_pso].velocity_values[psos[cur_pso].popu_index][b] );
}
}
}
|
btbytes/ci
|
1bb93f5ec879153a467d7bf5ca9f1e8dd742c7e2
|
adding Particle Swarm Optimization files
|
diff --git a/psos/Makefile b/psos/Makefile
new file mode 100644
index 0000000..e2af14d
--- /dev/null
+++ b/psos/Makefile
@@ -0,0 +1,2 @@
+all: definiti.h headfile.h mem_loc.h psostate.h
+ gcc -Wall -lm main.c mem_loc.c psostate.c -o psos
diff --git a/psos/definiti.h b/psos/definiti.h
new file mode 100644
index 0000000..e35e944
--- /dev/null
+++ b/psos/definiti.h
@@ -0,0 +1,14 @@
+#ifndef DEFINITION_H
+#define DEFINITION_H
+
+typedef enum BOOLEAN_Tag {FALSE, TRUE} BOOLEAN;
+
+typedef int *P_INT;
+typedef P_INT IVECTOR;
+typedef P_INT *IMATRIX;
+
+typedef float *P_FLOAT;
+typedef P_FLOAT FVECTOR;
+typedef P_FLOAT *FMATRIX;
+
+#endif
diff --git a/psos/headfile.h b/psos/headfile.h
new file mode 100644
index 0000000..cca8e6e
--- /dev/null
+++ b/psos/headfile.h
@@ -0,0 +1,10 @@
+#ifndef __HEADFILE_H__
+#define __HEADFILE_H__
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <math.h>
+#include <string.h>
+#include <time.h> //YS 01/16/98
+
+#endif
diff --git a/psos/main.c b/psos/main.c
new file mode 100644
index 0000000..419d0c7
--- /dev/null
+++ b/psos/main.c
@@ -0,0 +1,49 @@
+#include "headfile.h"
+#include "psostate.h"
+#include "mem_loc.h"
+
+/**************************************/
+/* Constants and Macros */
+/**************************************/
+#define NUM_RUN 1
+
+static void main_start_up(char *dataFile);
+static void main_clean_up(void);
+
+// Listing 4.17 PSO main() routine
+int main (int argc, char *argv[])
+{
+ int idx_i;
+ // global variable definitions
+
+
+ // check command line
+ if (argc != 2)
+ {
+ printf("Usage: exe_file run_file");
+ exit(1);
+ }
+
+ // initialize
+ main_start_up(argv[1]);
+
+ // run PSO
+ for (idx_i = 0; idx_i < NUM_RUN ; idx_i++)
+ {
+ PSO_Main_Loop();
+ }
+ // clean up memory space
+ main_clean_up();
+ return 0;
+}
+
+static void main_start_up (char *dataFile)
+{
+ PSO_Start_Up(dataFile);
+
+}
+
+static void main_clean_up (void)
+{
+ PSO_Clean_Up();
+}
diff --git a/psos/mem_loc.c b/psos/mem_loc.c
new file mode 100644
index 0000000..81d35c4
--- /dev/null
+++ b/psos/mem_loc.c
@@ -0,0 +1,98 @@
+#include "headfile.h"
+#include "mem_loc.h"
+
+/**********************************************************************
+ If you want to allocate a block larger than 64K, you must use
+ farcalloc instead of calloc
+**********************************************************************/
+
+/* Memory allocation functions for integer matrix and vector */
+
+void IVectorAllocate(IVECTOR *ivector, int nCols)
+{
+ if ((*ivector = (IVECTOR) calloc(nCols, sizeof(long int))) == NULL)
+ {
+ fprintf(stderr, "Sorry! Not enough memory for vector\n");
+ exit(1);
+ }
+}
+
+/* Allocate space for columns (int cells) for
+ dynamic two dimensional matrix[rows][cols]
+*/
+
+void IAllocateCols(P_INT imatrix[], int nRows, int nCols)
+{
+ int i;
+ for (i = 0; i < nRows; i++)
+ IVectorAllocate(&imatrix[i], nCols);
+}
+
+/* Allocate space for a two dimensional dynamic matrix [rows] [cols]
+*/
+
+void IMatrixAllocate(IMATRIX *ipmatrix, int nRows, int nCols)
+{
+ if ( (*ipmatrix = (IMATRIX) calloc(nRows, sizeof(long int) ) ) == NULL)
+ {
+ fprintf(stderr, "Sorry! Not enough memory for matrix\n");
+ exit(1);
+ }
+
+ IAllocateCols(*ipmatrix, nRows, nCols);
+}
+
+/* free space for two dimensional dynamic array */
+void IMatrixFree(IMATRIX imatrix, int nRows)
+{
+ int i;
+ for (i = 0; i < nRows; i++)
+ free(imatrix[i]);
+ free(imatrix);
+}
+
+/* *************** Float routines *************** */
+
+void FVectorAllocate(FVECTOR *fvector, int nCols)
+{
+ if ((*fvector = (FVECTOR) calloc(nCols, sizeof(float))) == NULL)
+ {
+ fprintf(stderr, "Sorry! Not enough memory for fvector\n");
+ exit(1);
+ }
+}
+
+/* Allocate space for columns (int cells) for
+ dynamic two dimensional matrix[rows][cols]
+*/
+
+void FAllocateCols(P_FLOAT fmatrix[], int nRows, int nCols)
+{
+ int i;
+ for (i = 0; i < nRows; i++)
+ FVectorAllocate(&fmatrix[i], nCols);
+}
+
+/* Allocate space for a two dimensional dynamic matrix [rows] [cols]
+*/
+
+void FMatrixAllocate(FMATRIX *fpmatrix, int nRows, int nCols)
+{
+ if ( (*fpmatrix = (FMATRIX) calloc(nRows, sizeof(float))) == NULL)
+ {
+ fprintf(stderr, "Sorry! Not enough memory for fmatrix\n");
+ exit(1);
+ }
+
+ FAllocateCols(*fpmatrix, nRows, nCols);
+}
+
+/* free space for two dimensional dynamic array */
+void FMatrixFree(FMATRIX fmatrix, int nRows)
+{
+ int i;
+ for (i = 0; i < nRows; i++)
+ free(fmatrix[i]);
+ free(fmatrix);
+}
+
diff --git a/psos/mem_loc.h b/psos/mem_loc.h
new file mode 100644
index 0000000..e79b2cd
--- /dev/null
+++ b/psos/mem_loc.h
@@ -0,0 +1,17 @@
+#ifndef __MEM_LOC_H__
+#define __MEM_LOC_H__
+
+#include "definiti.h"
+
+extern void IVectorAllocate(IVECTOR *ivector, int nCols);
+extern void IMatrixFree(IMATRIX imatrix, int nRows);
+extern void IMatrixAllocate(IMATRIX *ipmatrix, int nRows, int nCols);
+extern void IAllocateCols(P_INT imatrix[], int nRows, int nCols);
+
+extern void FVectorAllocate(FVECTOR *fvector, int nCols);
+extern void FMatrixFree(FMATRIX fmatrix, int nRows);
+extern void FMatrixAllocate(FMATRIX *fpmatrix, int nRows, int nCols);
+extern void FAllocateCols(P_FLOAT fmatrix[], int nRows, int nCols);
+
+#endif
+
diff --git a/psos/psostate.c b/psos/psostate.c
new file mode 100644
index 0000000..abea4ff
--- /dev/null
+++ b/psos/psostate.c
@@ -0,0 +1,1951 @@
+#include "headfile.h"
+#include "definiti.h"
+#include "psostate.h"
+#include "mem_loc.h"
+
+
+/**************************************************************/
+/* Static Variable and Const Variable with File level scope */
+/**************************************************************/
+static int NUM_PSO; // num of PSOs needs to be specified
+static int PSO_UPDATE_PBEST_EACH_CYCLE_FLAG; // needs to be specified, TRUE when PSOs serve as envirnment to the other PSOs
+
+static BOOLEAN pso_update_pbest_each_cycle_pending; //
+static PSO_State_Type PSO_current_state; // current state of the current PSO
+static int cur_pso; // current index of PSOs
+static int total_cycle_of_PSOs; // total cycles of running PSOs
+static int pso_cycle_index = 0; // index of cycles
+
+static PSO_Type *psos; // pointer to the array of PSOs
+
+/**************************************************************/
+/* Function Prototypes for functions with file level scope */
+/**************************************************************/
+
+static void PSO_update_inertia_weight(void); // state handlers
+static void PSO_evaluate(void);
+static void PSO_update_global_best(void);
+static void PSO_update_local_best(void);
+static void PSO_update_velocity(void);
+static void PSO_update_position(void);
+static void PSO_goal_reach_judge(void);
+static void PSO_next_generation(void);
+static void PSO_update_pbest_each_cycle(void);
+static void PSO_next_pso(void);
+static void PSOs_done(void);
+
+static void pso_initialize(void); // initialization
+static void PSO_random_symmetry_initialize(void);
+static void PSO_random_asymmetry_initialize(void);
+
+static void g1_min(void);
+static void g1_max(void);
+static void g7_min(void);
+static void g7_max(void);
+static void g9_min(void);
+static void g9_max(void);
+static void f6(void);
+static void sphere(void);
+static void rosenbrock(void);
+static void rastrigrin(void);
+static void griewank(void);
+
+static void constant_iw(void); // update inertia weight methods
+static void linear_iw(void);
+static void noise_addition_iw(void);
+
+static void read_pso_parameters(char *dataFile); // read PSO parameters from I/O file
+static void allocate_pso_memory(void); // allocate PSOs memory spaces
+static void free_pso_memory(void); // free PSOs memory spaces
+static void pso_store_results(void); // write PSO results to I/O file
+
+static void pso_state_handler(int); // PSO state handle routine
+static void PSO_initialize_handler(int); // PSO initialization
+static void evaluate_functions(int); // PSO evaluation functions
+static void iw_update_methods(int); // PSO update inertai weight methods
+
+/**************************************************************/
+/* Function Definitions */
+/**************************************************************/
+
+/**************************************************************/
+/* PSO Start and clean routines and interfaces */
+/**************************************************************/
+/**************************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+***************************************************************/
+void PSO_Start_Up (char *dataFile)
+{
+ read_pso_parameters(dataFile);
+ allocate_pso_memory(); // allocate memory for particles
+}
+
+/*************************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+*************************************************************/
+void PSO_Clean_Up (void)
+{
+ free_pso_memory(); // free memory space of particles
+}
+
+/************************************************************/
+/* PSO functons */
+/************************************************************/
+
+// Listing 4.18 & 4.22 The PSO_Main_Loop() routine
+/************************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+*************************************************************/
+void PSO_Main_Loop (void)
+{
+ BOOLEAN running;
+
+ pso_initialize();
+ // start running PSOs
+ while ((pso_cycle_index++) < total_cycle_of_PSOs)
+ {
+ running = TRUE;
+ while (running)
+ {
+ if (PSO_current_state == PSOS_DONE)
+ {
+ running = FALSE; // end running this cycle of PSO
+ }
+ pso_state_handler(PSO_current_state); // run state handler
+ }
+ }
+ pso_store_results(); // output results
+}
+
+/************************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+*************************************************************/
+static void read_pso_parameters (char *dataFile)
+{
+ int idx_i, idx_j;
+ FILE *frun;
+
+ // open the runfile to input parameters
+ if ((frun=fopen(dataFile,"r"))==NULL)
+ {
+ printf("Cant read run file");
+ exit(1);
+ }
+
+ fscanf(frun, "%d",&NUM_PSO); // total number of PSOs
+ // allocate memory for array of the PSOs
+ psos = malloc(NUM_PSO * sizeof(PSO_Type));
+ if (psos == NULL)
+ {
+ printf("Allocating memory for PSOs failed -- aborting\n");
+ exit(1);
+ }
+
+ fscanf(frun, "%d",&PSO_UPDATE_PBEST_EACH_CYCLE_FLAG); // whether to update pbest before moving to run another pso
+ fscanf(frun, "%d",&total_cycle_of_PSOs); // total cycles of running PSOs
+
+ // Read PSOs' parameters from runfile
+ for (idx_i=0; idx_i<NUM_PSO;idx_i++ )
+ {
+ //read environment data
+ fscanf(frun, "%d",&(psos[idx_i].env_data.opti_type )); // optimization type: min or max
+ fscanf(frun, "%d",&(psos[idx_i].env_data.function_type )); // evalutiona function
+ fscanf(frun, "%d",&(psos[idx_i].env_data.iw_method )); // inertia weight update method
+ fscanf(frun, "%d",&(psos[idx_i].env_data.init_type)); // initialization type: sym/asym
+ fscanf(frun, "%f",&(psos[idx_i].env_data.init_range.left )); // left initialization range
+ fscanf(frun, "%f",&(psos[idx_i].env_data.init_range.right ));// right initialization range
+ fscanf(frun, "%f",&(psos[idx_i].env_data.max_velocity )); // maximum velocity
+ fscanf(frun, "%f",&(psos[idx_i].env_data.max_position )); // maximum position
+ fscanf(frun, "%d",&(psos[idx_i].env_data.max_generation )); // max number of generations
+
+ //read PSO data
+ fscanf(frun, "%d",&(psos[idx_i].popu_size )); // population size
+ fscanf(frun, "%d",&(psos[idx_i].dimension )); // dimension
+ fscanf(frun, "%f",&(psos[idx_i].init_inertia_weight )); // initial inertia weight
+
+ // read boundary flag
+ fscanf(frun, "%d",&(psos[idx_i].env_data.boundary_flag ));
+ if (psos[idx_i].env_data.boundary_flag)
+ {
+ // allocate memory for boundaries
+ FVectorAllocate(&(psos[idx_i].env_data.low_boundaries), psos[idx_i].dimension);
+ FVectorAllocate(&(psos[idx_i].env_data.up_boundaries), psos[idx_i].dimension);
+
+ //read boundaries
+ for (idx_j = 0 ; idx_j < psos[idx_i].dimension ; idx_j++)
+ {
+ fscanf(frun, "%f",&(psos[idx_i].env_data.low_boundaries[idx_j]));
+ fscanf(frun, "%f",&(psos[idx_i].env_data.up_boundaries[idx_j]));
+ }
+ }
+ psos[idx_i].inertia_weight = psos[idx_i].init_inertia_weight;
+ }
+ // close runfile
+ fclose(frun);
+
+}
+
+/************************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+*************************************************************/
+static void allocate_pso_memory (void)
+{
+ int idx_i;
+
+ for (idx_i =0 ; idx_i<NUM_PSO ;idx_i++ )
+ {
+ FVectorAllocate(&(psos[idx_i].pbest_values), psos[idx_i].popu_size);
+ FMatrixAllocate(&(psos[idx_i].velocity_values), psos[idx_i].popu_size, psos[idx_i].dimension);
+ FMatrixAllocate(&(psos[idx_i].position_values), psos[idx_i].popu_size, psos[idx_i].dimension);
+ FMatrixAllocate(&(psos[idx_i].pbest_position_values), psos[idx_i].popu_size, psos[idx_i].dimension);
+ }
+}
+
+/************************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+*************************************************************/
+static void pso_store_results (void)
+{
+ int idx_i, idx_j, idx_k;
+ FILE *fout;
+
+ // open file for write
+ if ((fout=fopen("result","w"))==NULL)
+ {
+ printf("Cant open file for write");
+ exit(1);
+ }
+
+ // output the best position values
+ fprintf(fout,"the position value of the best individual\n");
+ for (idx_j =0;idx_j < NUM_PSO ; idx_j++)
+ {
+ fprintf(fout,"PSO Number %d :\n",idx_j);
+ for (idx_i=0;idx_i< psos[idx_j].dimension; idx_i++)
+ {
+ fprintf(fout,"%d: %f\n",idx_i,psos[idx_j].pbest_position_values[psos[idx_j].global_best_index][idx_i]);
+ }
+ fprintf(fout,"\n\n");
+ }
+
+ // output fitness values
+ fprintf(fout,"fitness values in pbest\n");
+
+ for (idx_j =0;idx_j < NUM_PSO ; idx_j++)
+ {
+ fprintf(fout,"PSO Number %d :\n",idx_j);
+ for (idx_i=0;idx_i< psos[idx_j].popu_size; idx_i++)
+ {
+ fprintf(fout,"%d: %f\n",idx_i,psos[idx_j].pbest_values[idx_i]);
+ }
+ fprintf(fout,"%dth is the best fitness %f\n",psos[idx_j].global_best_index,psos[idx_j].pbest_values[psos[idx_j].global_best_index]);
+ }
+
+ // output position values
+ fprintf(fout,"\npbest position values\n");
+ for (idx_j =0;idx_j < NUM_PSO ; idx_j++)
+ {
+ fprintf(fout,"PSO Number %d :\n",idx_j);
+ fprintf(fout,"particle index : parameter index parameter value\n");
+
+ for (idx_i=0;idx_i< psos[idx_j].popu_size; idx_i++)
+ {
+ for (idx_k = 0; idx_k < psos[idx_j].dimension; idx_k++ )
+ {
+ fprintf(fout,"%d:%d %f\n",idx_i,idx_k,psos[idx_j].pbest_position_values[idx_i][idx_k]);
+ }
+ }
+ }
+ fclose(fout);
+}
+
+/************************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+*************************************************************/
+static void free_pso_memory (void)
+{
+ int idx_i;
+ for (idx_i =0 ; idx_i<NUM_PSO ;idx_i++ )
+ {
+ free(psos[idx_i].pbest_values);
+ FMatrixFree((psos[idx_i].velocity_values), psos[idx_i].popu_size );
+ FMatrixFree((psos[idx_i].position_values), psos[idx_i].popu_size );
+ FMatrixFree((psos[idx_i].pbest_position_values), psos[idx_i].popu_size );
+ if (psos[idx_i].env_data.boundary_flag)
+ {
+ free(psos[idx_i].env_data.low_boundaries );
+ free(psos[idx_i].env_data.up_boundaries );
+ }
+ }
+ free(psos);
+}
+
+/************************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+*************************************************************/
+static void pso_initialize (void)
+{
+ int idx_i,idx_j;
+ int stime;
+ long ltime;
+
+ // get the current calendar time
+ ltime = time(NULL);
+ stime = (unsigned) ltime/2;
+ srand(stime);
+
+ for (idx_i = 0; idx_i <NUM_PSO ; idx_i++)
+ {
+ cur_pso = idx_i;
+ for (idx_j = 0; idx_j < (psos[cur_pso].popu_size) ; idx_j++ )
+ {
+ psos[cur_pso].popu_index = idx_j;
+
+ PSO_initialize_handler(psos[cur_pso].env_data.init_type);
+ }
+ }
+
+ cur_pso = 0; // done with initialization, move to the first PSO
+ PSO_current_state = PSO_EVALUATE; // move to the EVALUATE state
+ psos[cur_pso].popu_index = 0; // satrt with the first particle
+ psos[cur_pso].gene_index = 0; // start from the first generation of the first population
+ pso_cycle_index = 0; // start from the first cycle
+}
+
+/*************************************************/
+/* PSO State Handlers */
+/*************************************************/
+
+// Listing 4.19 & 4.24 The PSO state handling routine
+/*************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+**************************************************/
+static void pso_state_handler (int state_index)
+{
+ switch (state_index)
+ {
+ case PSO_UPDATE_INERTIA_WEIGHT:
+ PSO_update_inertia_weight();
+ break;
+ case PSO_EVALUATE:
+ PSO_evaluate();
+ break;
+ case PSO_UPDATE_GLOBAL_BEST:
+ PSO_update_global_best();
+ break;
+ case PSO_UPDATE_LOCAL_BEST:
+ PSO_update_local_best();
+ break;
+ case PSO_UPDTAE_VELOCITY:
+ PSO_update_velocity();
+ break;
+ case PSO_UPDATE_POSITION:
+ PSO_update_position();
+ break;
+ case PSO_GOAL_REACH_JUDGE:
+ PSO_goal_reach_judge();
+ break;
+ case PSO_NEXT_GENERATION:
+ PSO_next_generation();
+ break;
+ case PSO_UPDATE_PBEST_EACH_CYCLE:
+ PSO_update_pbest_each_cycle();
+ break;
+ case PSO_NEXT_PSO:
+ PSO_next_pso();
+ break;
+ case PSOS_DONE:
+ PSOs_done();
+ break;
+ default:
+ printf("wrong state index\n");
+ exit(1);
+ break;
+ }
+}
+
+// Listing 4.32 The PSO_UPDATE_INERTIA_WEIGHT state handle routine
+/*************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+**************************************************/
+static void PSO_update_inertia_weight (void)
+{
+ iw_update_methods(psos[cur_pso].env_data.iw_method);
+ PSO_current_state = PSO_EVALUATE; // move to the next state
+ psos[cur_pso].popu_index = 0; // satrt with the first particle
+
+}
+
+// Listing 4.20 & 4.25 The PSO_evaluate() routine
+/*************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+**************************************************/
+static void PSO_evaluate (void)
+{
+ if ((psos[cur_pso].popu_index) < (psos[cur_pso].popu_size))
+ {
+ evaluate_functions(psos[cur_pso].env_data.function_type);
+ PSO_current_state = PSO_UPDATE_LOCAL_BEST;
+ }
+ else // done with evaluation, move to the next state
+ {
+ PSO_current_state = PSO_GOAL_REACH_JUDGE ;
+ psos[cur_pso].popu_index = 0;
+ }
+}
+
+// Listing 4.26 The PSO_UPDATE_LOCAL_BEST state handle routine
+/*************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+**************************************************/
+static void PSO_update_local_best (void)
+{
+ int idx_i;
+
+ // here update local best
+ if ( (psos[cur_pso].env_data.opti_type) == MINIMIZATION)
+ { // minimization problem
+ if ( (pso_cycle_index == 1) && ((psos[cur_pso].gene_index) == 0 ) )
+ {
+ psos[cur_pso].global_best_index = 0;
+ psos[cur_pso].pbest_values[psos[cur_pso].popu_index] = psos[cur_pso].eva_fun_value;
+ }
+ if ((psos[cur_pso].eva_fun_value) < (psos[cur_pso].pbest_values[psos[cur_pso].popu_index] ))
+ {
+ psos[cur_pso].pbest_values[psos[cur_pso].popu_index] = psos[cur_pso].eva_fun_value;
+ for (idx_i = 0; idx_i < (psos[cur_pso].dimension) ;idx_i++ )
+ {
+ (psos[cur_pso].pbest_position_values[psos[cur_pso].popu_index ][idx_i ] ) = (psos[cur_pso].position_values[psos[cur_pso].popu_index ][idx_i ] );
+ }
+ }
+ }
+ else
+ { // maximization problem
+ if ( (pso_cycle_index == 1) && ((psos[cur_pso].gene_index) == 0 ) )
+ {
+ psos[cur_pso].global_best_index = 0;
+ psos[cur_pso].pbest_values[psos[cur_pso].popu_index] = psos[cur_pso].eva_fun_value;
+ }
+ if ((psos[cur_pso].eva_fun_value) > (psos[cur_pso].pbest_values[psos[cur_pso].popu_index] ))
+ {
+ psos[cur_pso].pbest_values[psos[cur_pso].popu_index] = psos[cur_pso].eva_fun_value;
+ for (idx_i = 0; idx_i < (psos[cur_pso].dimension) ;idx_i++ )
+ {
+ (psos[cur_pso].pbest_position_values[psos[cur_pso].popu_index ][idx_i ] ) = (psos[cur_pso].position_values[psos[cur_pso].popu_index ][idx_i ] );
+ }
+ }
+ }
+ PSO_current_state = PSO_UPDATE_GLOBAL_BEST ;
+}
+
+// Listing 4.27 The PSO_UPDATE_GLOBAL_BEST state handle routine
+/*************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+**************************************************/
+static void PSO_update_global_best (void)
+{
+ // here update global best
+ if ( (psos[cur_pso].env_data.opti_type) == MINIMIZATION)
+ { // minimization problem
+ if ((psos[cur_pso].eva_fun_value) < (psos[cur_pso].pbest_values[psos[cur_pso].global_best_index ] ))
+ {
+ psos[cur_pso].global_best_index = psos[cur_pso].popu_index;
+ }
+ }
+ else
+ { // maximization problem
+ if ((psos[cur_pso].eva_fun_value) > (psos[cur_pso].pbest_values[psos[cur_pso].global_best_index ] ))
+ {
+ psos[cur_pso].global_best_index = psos[cur_pso].popu_index;
+ }
+ }
+
+ PSO_current_state = PSO_UPDTAE_VELOCITY;
+}
+
+// Listing 4.28 The PSO_UPDATE_VELOCITY state handle routine
+/*************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+**************************************************/
+static void PSO_update_velocity (void)
+{
+ int idx_i;
+ // here update velocity
+ for (idx_i = 0; idx_i < (psos[cur_pso].dimension) ;idx_i++ )
+ {
+ psos[cur_pso].velocity_values[psos[cur_pso].popu_index ][idx_i ] =
+ (psos[cur_pso].inertia_weight) * (psos[cur_pso].velocity_values[psos[cur_pso].popu_index ][idx_i ] )
+ + 2*((rand()%1000)/1000.0) * (psos[cur_pso].pbest_position_values[psos[cur_pso].popu_index ][idx_i ] - psos[cur_pso].position_values[psos[cur_pso].popu_index ][idx_i ] )
+ + 2*((rand()%1000)/1000.0) * (psos[cur_pso].pbest_position_values[psos[cur_pso].global_best_index ][idx_i ] - psos[cur_pso].position_values[psos[cur_pso].popu_index ][idx_i ] );
+
+ if (psos[cur_pso].env_data.boundary_flag)
+ {
+ if ( fabs(psos[cur_pso].velocity_values[psos[cur_pso].popu_index ][idx_i ] ) > (0.5*((psos[cur_pso].env_data.up_boundaries[idx_i]) - (psos[cur_pso].env_data.low_boundaries[idx_i] ))))
+ {
+ if ((psos[cur_pso].velocity_values[psos[cur_pso].popu_index ][idx_i ] ) >= 0)
+ {
+ psos[cur_pso].velocity_values[psos[cur_pso].popu_index ][idx_i ] = 0.5*((psos[cur_pso].env_data.up_boundaries[idx_i]) - (psos[cur_pso].env_data.low_boundaries[idx_i] ));
+ }
+ else
+ {
+ psos[cur_pso].velocity_values[psos[cur_pso].popu_index ][idx_i ] = -0.5*((psos[cur_pso].env_data.up_boundaries[idx_i]) - (psos[cur_pso].env_data.low_boundaries[idx_i] ));
+ }
+ }
+ }
+ else
+ {
+ if ( (psos[cur_pso].velocity_values[psos[cur_pso].popu_index ][idx_i ] ) > (psos[cur_pso].env_data.max_velocity ) )
+ {
+ psos[cur_pso].velocity_values[psos[cur_pso].popu_index ][idx_i ] = psos[cur_pso].env_data.max_velocity;
+ }
+ else if ( (psos[cur_pso].velocity_values[psos[cur_pso].popu_index ][idx_i ] ) < (-(psos[cur_pso].env_data.max_velocity ) ) )
+ {
+ psos[cur_pso].velocity_values[psos[cur_pso].popu_index ][idx_i ] = -(psos[cur_pso].env_data.max_velocity );
+ }
+ }
+ }
+
+ PSO_current_state = PSO_UPDATE_POSITION; // go to the PSO_UPDATE_POSITION state
+
+}
+
+// Listing 4.29 The PSO_UPDATE_POSITION state handle routine
+/*************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+**************************************************/
+static void PSO_update_position (void)
+{
+ int idx_i;
+ for (idx_i = 0; idx_i < (psos[cur_pso].dimension) ;idx_i++ )
+ {
+ psos[cur_pso].position_values[psos[cur_pso].popu_index ][idx_i ] += psos[cur_pso].velocity_values[psos[cur_pso].popu_index ][idx_i ];
+ if (psos[cur_pso].env_data.boundary_flag)
+ {
+ if ((psos[cur_pso].position_values[psos[cur_pso].popu_index ][idx_i ]) < (psos[cur_pso].env_data.low_boundaries[idx_i] ))
+ {
+ psos[cur_pso].position_values[psos[cur_pso].popu_index ][idx_i ] = psos[cur_pso].env_data.low_boundaries[idx_i] + ((psos[cur_pso].env_data.up_boundaries[idx_i] - psos[cur_pso].env_data.low_boundaries[idx_i] ) * ((rand()%1000)/100000.0)); // low boundary + noise
+ }
+ else if ((psos[cur_pso].position_values[psos[cur_pso].popu_index ][idx_i ]) > (psos[cur_pso].env_data.up_boundaries[idx_i]))
+ {
+ psos[cur_pso].position_values[psos[cur_pso].popu_index ][idx_i ] = psos[cur_pso].env_data.up_boundaries[idx_i] - ((psos[cur_pso].env_data.up_boundaries[idx_i] - psos[cur_pso].env_data.low_boundaries[idx_i] ) * ((rand()%1000)/100000.0)); // up boundary - noise
+ }
+ }
+ }
+
+ PSO_current_state = PSO_EVALUATE; // go back to the PSO_EVALUATE state
+ (psos[cur_pso].popu_index)++;
+}
+
+// Listing 4.30 The PSO_GOAL_REACH_JUDGE state handle routine
+/*************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+**************************************************/
+static void PSO_goal_reach_judge (void)
+{
+ PSO_current_state = PSO_NEXT_GENERATION;
+}
+
+// The PSO_NEXT_GENERATION state handle routine
+/*************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+**************************************************/
+static void PSO_next_generation (void)
+{
+ if ((++(psos[cur_pso].gene_index)) < (psos[cur_pso].env_data.max_generation ) )
+ { // next generation of the same population of PSO
+ PSO_current_state = PSO_UPDATE_INERTIA_WEIGHT;
+ }
+ else
+ {
+ if ( (++cur_pso ) >= NUM_PSO)
+ { // end of the cycle
+ cur_pso = 0; // move to the first pso
+ }
+ PSO_current_state = PSO_UPDATE_PBEST_EACH_CYCLE; // move to the next state
+ psos[cur_pso].popu_index = 0;
+ }
+}
+
+// Listing 4.33 The PSO_UPDATE_PBEST_EACH_CYCLE state handle routine
+/*************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+**************************************************/
+static void PSO_update_pbest_each_cycle (void)
+{
+ if (PSO_UPDATE_PBEST_EACH_CYCLE_FLAG)
+ {
+ pso_update_pbest_each_cycle_pending = TRUE;
+ if ((psos[cur_pso].popu_index) < (psos[cur_pso].popu_size))
+ {
+ evaluate_functions(psos[cur_pso].env_data.function_type);
+ psos[cur_pso].pbest_values[psos[cur_pso].popu_index] = psos[cur_pso].eva_fun_value; // update pbest
+ psos[cur_pso].popu_index++;
+ }
+ else // done with evaluation, move to the next state
+ {
+ PSO_current_state = PSO_NEXT_PSO;
+ pso_update_pbest_each_cycle_pending = FALSE;
+ }
+ }
+ else
+ {
+ PSO_current_state = PSO_NEXT_PSO;
+ }
+}
+
+// Listing 4.34 The PSO_NEXT_PSO state handle routine
+/*************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+**************************************************/
+static void PSO_next_pso (void)
+{
+ // next PSO
+ if ( cur_pso > 0)
+ {
+ PSO_current_state = PSO_EVALUATE; // move to the EVALUATE state for the next pso in the same cycle
+ }
+ else
+ { // all the PSOs have been processed
+ PSO_current_state = PSOS_DONE; // end of the cycle
+ }
+ psos[cur_pso].popu_index = 0; // satrt with the first particle
+ psos[cur_pso].gene_index = 0; // satrt with the first particle
+}
+
+// Listing 4.35 The PSOS_DONE state handle routine
+/*************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+**************************************************/
+static void PSOs_done (void)
+{
+ PSO_current_state = PSO_EVALUATE; // if start another cycle, start from PSO_EVALUATE
+}
+
+/*************************************************/
+/* PSO Evaluation Functions */
+/*************************************************/
+
+/*************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+**************************************************/
+static void evaluate_functions (int fun_type)
+{
+ switch (fun_type)
+ {
+ case G1_MIN:
+ g1_min();
+ break;
+ case G1_MAX:
+ g1_max();
+ break;
+ case G7_MIN:
+ g7_min();
+ break;
+ case G7_MAX:
+ g7_max();
+ break;
+ case G9_MIN:
+ g9_min();
+ break;
+ case G9_MAX:
+ g9_max();
+ break;
+ case F6:
+ f6();
+ break;
+ case SPHERE:
+ sphere();
+ break;
+ case ROSENBROCK:
+ rosenbrock();
+ break;
+ case RASTRIGRIN:
+ rastrigrin();
+ break;
+ case GRIEWANK:
+ griewank();
+ break;
+ default:
+ printf("wrong function type\n");
+ exit(1);
+ break;
+ }
+}
+
+/*************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+**************************************************/
+static void g1_min (void)
+{
+ int idx_i,idx_j;
+ double x[13];
+ double y[9];
+ double r[9];
+ double p[9];
+
+ double fit_value;
+ int env_pso;
+ double temp_max,temp_value;
+
+ if ((psos[0].dimension != 13) || (psos[1].dimension != 9))
+ {
+ printf("G1 wrong function dimention\n");
+ exit(1);
+ }
+
+ env_pso = (cur_pso)?0:1;
+
+ if (pso_update_pbest_each_cycle_pending)
+ {
+ for (idx_i = 0; idx_i <13 ; idx_i++ )
+ {
+ x[idx_i] = psos[cur_pso].pbest_position_values[psos[cur_pso].popu_index][idx_i];
+ }
+ }
+ else
+ {
+ for (idx_i = 0; idx_i <13 ; idx_i++ )
+ {
+ x[idx_i] = psos[cur_pso].position_values[psos[cur_pso].popu_index][idx_i];
+ }
+ }
+
+ // constrains
+ r[0] = 2 * x[0] + 2 * x[1] + x[9] + x[10] - 10;
+ r[1] = 2 * x[0] + 2 * x[2] + x[9] + x[11] - 10;
+ r[2] = 2 * x[1] + 2 * x[2] + x[10] + x[11] - 10;
+ r[3] = -8 * x[0] + x[9];
+ r[4] = -8 * x[1] + x[10];
+ r[5] = -8 * x[2] + x[11];
+ r[6] = -2 * x[3] - x[4] + x[9];
+ r[7] = -2 * x[5] - x[6] + x[10];
+ r[8] = -2 * x[7] - x[8] + x[11];
+
+ for (idx_i = 0; idx_i < (psos[env_pso].popu_size) ; idx_i++ )
+ {
+ if (pso_update_pbest_each_cycle_pending)
+ {
+ for (idx_j = 0; idx_j <9 ; idx_j++ )
+ {
+ y[idx_j] = psos[env_pso].pbest_position_values[idx_i][idx_j];
+ }
+ }
+ else
+ {
+ for (idx_j = 0; idx_j <9 ; idx_j++ )
+ {
+ y[idx_j] = psos[env_pso].position_values[idx_i][idx_j];
+ }
+ }
+
+ temp_value = 0.0;
+ for (idx_j = 0; idx_j <4 ; idx_j++ )
+ {
+ temp_value += 5 * (x[idx_j] - x[idx_j] * x[idx_j]);
+ }
+
+ for (idx_j = 4; idx_j <13 ; idx_j++ )
+ {
+ temp_value -= x[idx_j];
+ }
+
+ for (idx_j = 0; idx_j <9 ; idx_j++ )
+ {
+ if ((r[idx_j]) >= (-y[idx_j]/200.0))
+ {
+ p[idx_j] = y[idx_j] * r[idx_j] + 100 * r[idx_j] * r[idx_j];
+ }
+ else
+ {
+ p[idx_j] = - y[idx_j] * y[idx_j]/400.0;
+ }
+ }
+
+
+ for (idx_j = 0; idx_j <9 ; idx_j++ )
+ {
+ temp_value += p[idx_j];
+ }
+
+ if (idx_i == 0)
+ {
+ temp_max = temp_value;
+ fit_value = temp_max;
+ }
+ else
+ {
+ fit_value = max(temp_value,temp_max);
+ temp_max = fit_value;
+ }
+
+ }
+
+ psos[cur_pso].eva_fun_value = fit_value;
+}
+
+/*************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+**************************************************/
+static void g1_max (void)
+{
+ int idx_i,idx_j;
+ double x[13];
+ double y[9];
+ double r[9];
+ double p[9];
+
+ double fit_value;
+ int env_pso;
+ double temp_min,temp_value;
+
+ if ((psos[0].dimension != 13) || (psos[1].dimension != 9))
+ {
+ printf("G1 wrong function dimention\n");
+ exit(1);
+ }
+
+ env_pso = (cur_pso)?0:1;
+
+ if (pso_update_pbest_each_cycle_pending)
+ {
+ for (idx_i = 0; idx_i <9 ; idx_i++ )
+ {
+ y[idx_i] = psos[cur_pso].pbest_position_values[psos[cur_pso].popu_index][idx_i];
+ }
+ }
+ else
+ {
+ for (idx_i = 0; idx_i <9 ; idx_i++ )
+ {
+ y[idx_i] = psos[cur_pso].position_values[psos[cur_pso].popu_index][idx_i];
+ }
+ }
+
+ for (idx_i = 0; idx_i < (psos[env_pso].popu_size) ; idx_i++ )
+ {
+ if (pso_update_pbest_each_cycle_pending)
+ {
+ for (idx_j = 0; idx_j <13 ; idx_j++ )
+ {
+ x[idx_j] = psos[env_pso].pbest_position_values[idx_i][idx_j];
+ }
+ }
+ else
+ {
+ for (idx_j = 0; idx_j <13 ; idx_j++ )
+ {
+ x[idx_j] = psos[env_pso].position_values[idx_i][idx_j];
+ }
+ }
+
+ // constrains
+ r[0] = 2 * x[0] + 2 * x[1] + x[9] + x[10] - 10;
+ r[1] = 2 * x[0] + 2 * x[2] + x[9] + x[11] - 10;
+ r[2] = 2 * x[1] + 2 * x[2] + x[10] + x[11] - 10;
+ r[3] = -8 * x[0] + x[9];
+ r[4] = -8 * x[1] + x[10];
+ r[5] = -8 * x[2] + x[11];
+ r[6] = -2 * x[3] - x[4] + x[9];
+ r[7] = -2 * x[5] - x[6] + x[10];
+ r[8] = -2 * x[7] - x[8] + x[11];
+
+ temp_value = 0.0;
+ for (idx_j = 0; idx_j <4 ; idx_j++ )
+ {
+ temp_value += 5 * (x[idx_j] - x[idx_j] * x[idx_j]);
+ }
+
+ for (idx_j = 4; idx_j <13 ; idx_j++ )
+ {
+ temp_value -= x[idx_j];
+ }
+
+ for (idx_j = 0; idx_j <9 ; idx_j++ )
+ {
+ if ((r[idx_j]) >= (-y[idx_j]/200.0))
+ {
+ p[idx_j] = y[idx_j] * r[idx_j] + 100 * r[idx_j] * r[idx_j];
+ }
+ else
+ {
+ p[idx_j] = - y[idx_j] * y[idx_j]/400.0;
+ }
+ }
+
+
+ for (idx_j = 0; idx_j <9 ; idx_j++ )
+ {
+ temp_value += p[idx_j];
+ }
+
+ if (idx_i == 0)
+ {
+ temp_min = temp_value;
+ fit_value = temp_min;
+ }
+ else
+ {
+ fit_value = min(temp_value,temp_min);
+ temp_min = fit_value;
+ }
+
+ }
+
+ psos[cur_pso].eva_fun_value = fit_value;
+}
+
+/*************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+**************************************************/
+static void g7_min (void)
+{
+ int idx_i;
+ double x1, x2, x3, x4, x5, x6, x7, x8, x9, x10;
+ double y1, y2, y3, y4, y5, y6, y7, y8;
+ double r1, r2, r3, r4, r5, r6, r7, r8;
+ double p1, p2, p3, p4, p5, p6, p7, p8;
+
+ double fit_value;
+ int env_pso;
+ double temp_max,temp_value;
+
+ if ((psos[0].dimension != 10) || (psos[1].dimension != 8))
+ {
+ printf("G7 wrong function dimention\n");
+ exit(1);
+ }
+
+ env_pso = (cur_pso)?0:1;
+
+ if (pso_update_pbest_each_cycle_pending)
+ {
+ x1 = psos[cur_pso].pbest_position_values[psos[cur_pso].popu_index][0];
+ x2 = psos[cur_pso].pbest_position_values[psos[cur_pso].popu_index][1];
+ x3 = psos[cur_pso].pbest_position_values[psos[cur_pso].popu_index][2];
+ x4 = psos[cur_pso].pbest_position_values[psos[cur_pso].popu_index][3];
+ x5 = psos[cur_pso].pbest_position_values[psos[cur_pso].popu_index][4];
+ x6 = psos[cur_pso].pbest_position_values[psos[cur_pso].popu_index][5];
+ x7 = psos[cur_pso].pbest_position_values[psos[cur_pso].popu_index][6];
+ x8 = psos[cur_pso].pbest_position_values[psos[cur_pso].popu_index][7];
+ x9 = psos[cur_pso].pbest_position_values[psos[cur_pso].popu_index][8];
+ x10 = psos[cur_pso].pbest_position_values[psos[cur_pso].popu_index][9];
+ }
+ else
+ {
+ x1 = psos[cur_pso].position_values[psos[cur_pso].popu_index][0];
+ x2 = psos[cur_pso].position_values[psos[cur_pso].popu_index][1];
+ x3 = psos[cur_pso].position_values[psos[cur_pso].popu_index][2];
+ x4 = psos[cur_pso].position_values[psos[cur_pso].popu_index][3];
+ x5 = psos[cur_pso].position_values[psos[cur_pso].popu_index][4];
+ x6 = psos[cur_pso].position_values[psos[cur_pso].popu_index][5];
+ x7 = psos[cur_pso].position_values[psos[cur_pso].popu_index][6];
+ x8 = psos[cur_pso].position_values[psos[cur_pso].popu_index][7];
+ x9 = psos[cur_pso].position_values[psos[cur_pso].popu_index][8];
+ x10 = psos[cur_pso].position_values[psos[cur_pso].popu_index][9];
+ }
+
+ // restrictions
+ r1= -(105- 4*x1- 5*x2+ 3*x7- 9*x8);
+ r2= -(-3*pow(x1-2, 2)- 4*pow(x2-3, 2)- 2*x3*x3+ 7*x4+ 120);
+ r3= -(-10*x1+ 8*x2+ 17*x7- 2*x8);
+ r4= -(-x1*x1- 2*pow(x2-2, 2)+ 2*x1*x2- 14*x5+ 6*x6);
+ r5= -(8*x1- 2*x2- 5*x9+ 2*x10+12);
+ r6= -(-5*x1*x1- 8*x2- pow(x3-6, 2)+ 2*x4+ 40);
+ r7= -(3*x1 -6*x2- 12*pow(x9-8, 2)+ 7*x10);
+ r8= -(-0.5*pow(x1-8, 2)- 2*(x2-4)- 3*x5*x5+ x6+ 30);
+
+ for (idx_i = 0; idx_i < (psos[env_pso].popu_size) ; idx_i++ )
+ {
+ if (pso_update_pbest_each_cycle_pending)
+ {
+ y1 = psos[env_pso].pbest_position_values[idx_i][0];
+ y2 = psos[env_pso].pbest_position_values[idx_i][1];
+ y3 = psos[env_pso].pbest_position_values[idx_i][2];
+ y4 = psos[env_pso].pbest_position_values[idx_i][3];
+ y5 = psos[env_pso].pbest_position_values[idx_i][4];
+ y6 = psos[env_pso].pbest_position_values[idx_i][5];
+ y7 = psos[env_pso].pbest_position_values[idx_i][6];
+ y8 = psos[env_pso].pbest_position_values[idx_i][7];
+ }
+ else
+ {
+ y1 = psos[env_pso].position_values[idx_i][0];
+ y2 = psos[env_pso].position_values[idx_i][1];
+ y3 = psos[env_pso].position_values[idx_i][2];
+ y4 = psos[env_pso].position_values[idx_i][3];
+ y5 = psos[env_pso].position_values[idx_i][4];
+ y6 = psos[env_pso].position_values[idx_i][5];
+ y7 = psos[env_pso].position_values[idx_i][6];
+ y8 = psos[env_pso].position_values[idx_i][7];
+ }
+
+ temp_value = x1*x1+ x2*x2+ x1*x2- 14*x1 -16*x2+ pow(x3-10, 2)
+ +4*pow(x4-5,2)+ pow(x5-3, 2)+ 2*pow(x6-1, 2)+ 5*x7*x7
+ +7*pow(x8-11, 2)+ 2*pow(x9-10, 2)+ pow(x10-7, 2)+ 45;
+
+ if ((r1) >= (-y1/200.0))
+ {
+ p1 = y1 * r1 + 100 * r1 * r1;
+ }
+ else
+ {
+ p1 = - y1*y1/400.0;
+ }
+
+ if ((r2) >= (-y2/200.0))
+ {
+ p2 = y2 * r2 + 100 * r2 * r2;
+ }
+ else
+ {
+ p2 = - y2*y2/400.0;
+ }
+
+ if ((r3) >= (-y3/200.0))
+ {
+ p3 = y3 * r3 + 100 * r3 * r3;
+ }
+ else
+ {
+ p3 = - y3*y3/400.0;
+ }
+
+ if ((r4) >= (-y4/200.0))
+ {
+ p4 = y4 * r4 + 100 * r4 * r4;
+ }
+ else
+ {
+ p4 = - y4*y4/400.0;
+ }
+
+ if ((r5) >= (-y5/200.0))
+ {
+ p5 = y5 * r5 + 100 * r5 * r5;
+ }
+ else
+ {
+ p5 = - y5*y5/400.0;
+ }
+
+ if ((r6) >= (-y6/200.0))
+ {
+ p6 = y6 * r6 + 100 * r6 * r6;
+ }
+ else
+ {
+ p6 = - y6*y6/400.0;
+ }
+
+ if ((r7) >= (-y7/200.0))
+ {
+ p7 = y7 * r7 + 100 * r7 * r7;
+ }
+ else
+ {
+ p7 = - y7*y7/400.0;
+ }
+
+ if ((r8) >= (-y8/200.0))
+ {
+ p8 = y8 * r8 + 100 * r8 * r8;
+ }
+ else
+ {
+ p8 = - y8*y8/400.0;
+ }
+
+ temp_value += p1 + p2 + p3 + p4 + p5 + p6 + p7 + p8;
+ //temp_value += (y1*r1 +y2*r2 +y3*r3 +y4*r4 +y5*r5 +y6*r6 +y7*r7+y8*r8);
+
+ if (idx_i == 0)
+ {
+ temp_max = temp_value;
+ fit_value = temp_max;
+ }
+ else
+ {
+ fit_value = max(temp_value,temp_max);
+ temp_max = fit_value;
+ }
+
+ }
+
+ psos[cur_pso].eva_fun_value = fit_value;
+
+}
+
+/*************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+**************************************************/
+static void g7_max (void)
+{
+ int idx_i;
+ double x1, x2, x3, x4, x5, x6, x7, x8, x9, x10;
+ double y1, y2, y3, y4, y5, y6, y7, y8;
+ double r1, r2, r3, r4, r5, r6, r7, r8;
+ double p1, p2, p3, p4, p5, p6, p7, p8;
+ double fit_value;
+ int env_pso;
+ double temp_min,temp_value;
+
+ if ((psos[0].dimension != 10) || (psos[1].dimension != 8))
+ {
+ printf("G7 wrong function dimention\n");
+ exit(1);
+ }
+
+ env_pso = (cur_pso)?0:1;
+
+ if (pso_update_pbest_each_cycle_pending)
+ {
+ y1 = psos[cur_pso].pbest_position_values[psos[cur_pso].popu_index][0];
+ y2 = psos[cur_pso].pbest_position_values[psos[cur_pso].popu_index][1];
+ y3 = psos[cur_pso].pbest_position_values[psos[cur_pso].popu_index][2];
+ y4 = psos[cur_pso].pbest_position_values[psos[cur_pso].popu_index][3];
+ y5 = psos[cur_pso].pbest_position_values[psos[cur_pso].popu_index][4];
+ y6 = psos[cur_pso].pbest_position_values[psos[cur_pso].popu_index][5];
+ y7 = psos[cur_pso].pbest_position_values[psos[cur_pso].popu_index][6];
+ y8 = psos[cur_pso].pbest_position_values[psos[cur_pso].popu_index][7];
+ }
+ else
+ {
+ y1 = psos[cur_pso].position_values[psos[cur_pso].popu_index][0];
+ y2 = psos[cur_pso].position_values[psos[cur_pso].popu_index][1];
+ y3 = psos[cur_pso].position_values[psos[cur_pso].popu_index][2];
+ y4 = psos[cur_pso].position_values[psos[cur_pso].popu_index][3];
+ y5 = psos[cur_pso].position_values[psos[cur_pso].popu_index][4];
+ y6 = psos[cur_pso].position_values[psos[cur_pso].popu_index][5];
+ y7 = psos[cur_pso].position_values[psos[cur_pso].popu_index][6];
+ y8 = psos[cur_pso].position_values[psos[cur_pso].popu_index][7];
+ }
+
+ for (idx_i = 0; idx_i < (psos[env_pso].popu_size) ; idx_i++ )
+ {
+ if (pso_update_pbest_each_cycle_pending)
+ {
+ x1 = psos[env_pso].pbest_position_values[idx_i][0];
+ x2 = psos[env_pso].pbest_position_values[idx_i][1];
+ x3 = psos[env_pso].pbest_position_values[idx_i][2];
+ x4 = psos[env_pso].pbest_position_values[idx_i][3];
+ x5 = psos[env_pso].pbest_position_values[idx_i][4];
+ x6 = psos[env_pso].pbest_position_values[idx_i][5];
+ x7 = psos[env_pso].pbest_position_values[idx_i][6];
+ x8 = psos[env_pso].pbest_position_values[idx_i][7];
+ x9 = psos[env_pso].pbest_position_values[idx_i][8];
+ x10 = psos[env_pso].pbest_position_values[idx_i][9];
+ }
+ else
+ {
+ x1 = psos[env_pso].position_values[idx_i][0];
+ x2 = psos[env_pso].position_values[idx_i][1];
+ x3 = psos[env_pso].position_values[idx_i][2];
+ x4 = psos[env_pso].position_values[idx_i][3];
+ x5 = psos[env_pso].position_values[idx_i][4];
+ x6 = psos[env_pso].position_values[idx_i][5];
+ x7 = psos[env_pso].position_values[idx_i][6];
+ x8 = psos[env_pso].position_values[idx_i][7];
+ x9 = psos[env_pso].position_values[idx_i][8];
+ x10 = psos[env_pso].position_values[idx_i][9];
+ }
+
+ r1= -(105- 4*x1- 5*x2+ 3*x7- 9*x8);
+ r2= -(-3*pow(x1-2, 2)- 4*pow(x2-3, 2)- 2*x3*x3+ 7*x4+ 120);
+ r3= -(-10*x1+ 8*x2+ 17*x7- 2*x8);
+ r4= -(-x1*x1- 2*pow(x2-2, 2)+2*x1*x2- 14*x5+ 6*x6);
+ r5= -(8*x1- 2*x2- 5*x9+ 2*x10+12);
+ r6= -(-5*x1*x1- 8*x2- 1*pow(x3-6, 2)+ 2*x4+ 40);
+ r7= -(3*x1 -6*x2- 12*pow(x9-8, 2)+ 7*x10);
+ r8= -(-0.5*pow(x1-8, 2)- 2*(x2-4)- 3*x5*x5+ x6+ 30);
+
+ temp_value = x1*x1+ x2*x2+ x1*x2- 14*x1 -16*x2+ pow(x3-10, 2)
+ +4*pow(x4-5,2)+ pow(x5-3, 2)+ 2*pow(x6-1, 2)+ 5*x7*x7
+ +7*pow(x8-11, 2)+ 2*pow(x9-10, 2)+ pow(x10-7, 2)+ 45;
+
+ if ((r1) >= (-y1/200.0))
+ {
+ p1 = y1 * r1 + 100 * r1 * r1;
+ }
+ else
+ {
+ p1 = - y1*y1/400.0;
+ }
+
+ if ((r2) >= (-y2/200.0))
+ {
+ p2 = y2 * r2 + 100 * r2 * r2;
+ }
+ else
+ {
+ p2 = - y2*y2/400.0;
+ }
+
+ if ((r3) >= (-y3/200.0))
+ {
+ p3 = y3 * r3 + 100 * r3 * r3;
+ }
+ else
+ {
+ p3 = - y3*y3/400.0;
+ }
+
+ if ((r4) >= (-y4/200.0))
+ {
+ p4 = y4 * r4 + 100 * r4 * r4;
+ }
+ else
+ {
+ p4 = - y4*y4/400.0;
+ }
+
+ if ((r5) >= (-y5/200.0))
+ {
+ p5 = y5 * r5 + 100 * r5 * r5;
+ }
+ else
+ {
+ p5 = - y5*y5/400.0;
+ }
+
+ if ((r6) >= (-y6/200.0))
+ {
+ p6 = y6 * r6 + 100 * r6 * r6;
+ }
+ else
+ {
+ p6 = - y6*y6/400.0;
+ }
+
+ if ((r7) >= (-y7/200.0))
+ {
+ p7 = y7 * r7 + 100 * r7 * r7;
+ }
+ else
+ {
+ p7 = - y7*y7/400.0;
+ }
+
+ if ((r8) >= (-y8/200.0))
+ {
+ p8 = y8 * r8 + 100 * r8 * r8;
+ }
+ else
+ {
+ p8 = - y8*y8/400.0;
+ }
+
+ temp_value += p1 + p2 + p3 + p4 + p5 + p6 + p7 + p8;
+ //temp_value += (y1*r1 +y2*r2 +y3*r3 +y4*r4 +y5*r5 +y6*r6 +y7*r7+y8*r8);
+
+ if (idx_i == 0)
+ {
+ temp_min = temp_value;
+ fit_value = temp_min;
+ }
+ else
+ {
+ fit_value = min(temp_value,temp_min);
+ temp_min = fit_value;
+ }
+
+ }
+
+ psos[cur_pso].eva_fun_value = fit_value;
+}
+
+/*************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+**************************************************/
+static void g9_min (void)
+{
+ int idx_i,idx_j;
+ double x[7];
+ double y[4];
+ double r[4];
+ double p[4];
+
+ double fit_value;
+ int env_pso;
+ double temp_max,temp_value;
+
+ if ((psos[0].dimension != 7) || (psos[1].dimension != 4))
+ {
+ printf("G9 wrong function dimention\n");
+ exit(1);
+ }
+
+ env_pso = (cur_pso)?0:1;
+
+ if (pso_update_pbest_each_cycle_pending)
+ {
+ for (idx_i = 0; idx_i <7 ; idx_i++ )
+ {
+ x[idx_i] = psos[cur_pso].pbest_position_values[psos[cur_pso].popu_index][idx_i];
+ }
+ }
+ else
+ {
+ for (idx_i = 0; idx_i <7 ; idx_i++ )
+ {
+ x[idx_i] = psos[cur_pso].position_values[psos[cur_pso].popu_index][idx_i];
+ }
+ }
+
+ // constrains
+ r[0] = 2 * x[0] * x[0] + 3 * pow(x[1],4) + x[2] + 4 * x[3] * x[3] + 5 * x[4] - 127;
+ r[1] = 7 * x[0] + 3 * x[1] + 10 * x[2] * x[2] + x[3] - x[4] - 282;
+ r[2] = 23 * x[0] + x[1] * x[1] + 6 * x[5] * x[5] - 8 * x[6] - 196;
+ r[3] = 4 * x[0] * x[0] + x[1] * x[1] - 3 * x[0] * x[1] + 2 * x[2] * x[2] + 5 * x[5] - 11 * x[6];
+
+ for (idx_i = 0; idx_i < (psos[env_pso].popu_size) ; idx_i++ )
+ {
+ if (pso_update_pbest_each_cycle_pending)
+ {
+ for (idx_j = 0; idx_j <4 ; idx_j++ )
+ {
+ y[idx_j] = psos[env_pso].pbest_position_values[idx_i][idx_j];
+ }
+ }
+ else
+ {
+ for (idx_j = 0; idx_j <4 ; idx_j++ )
+ {
+ y[idx_j] = psos[env_pso].position_values[idx_i][idx_j];
+ }
+ }
+
+ temp_value = pow((x[0] - 10),2) + 5 * pow((x[1] - 12),2) + pow(x[2],4) + 3 * pow((x[3] - 11),2)
+ + 10 * pow((x[4]),6) + 7 * pow(x[5],2) + pow(x[6],4) - 4 * x[5] * x[6] - 10 * x[5] - 8 * x[6];
+
+ for (idx_j = 0; idx_j <4 ; idx_j++ )
+ {
+ if ((r[idx_j]) >= (-y[idx_j]/200.0))
+ {
+ p[idx_j] = y[idx_j] * r[idx_j] + 100 * r[idx_j] * r[idx_j];
+ }
+ else
+ {
+ p[idx_j] = - y[idx_j] * y[idx_j]/400.0;
+ }
+ temp_value += p[idx_j];
+ }
+ if (idx_i == 0)
+ {
+ temp_max = temp_value;
+ fit_value = temp_max;
+ }
+ else
+ {
+ fit_value = max(temp_value,temp_max);
+ temp_max = fit_value;
+ }
+ }
+ psos[cur_pso].eva_fun_value = fit_value;
+}
+
+/*************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+**************************************************/
+static void g9_max (void)
+{
+ int idx_i,idx_j;
+ double x[7];
+ double y[4];
+ double r[4];
+ double p[4];
+
+ double fit_value;
+ int env_pso;
+ double temp_min,temp_value;
+
+ if ((psos[0].dimension != 7) || (psos[1].dimension != 4))
+ {
+ printf("G9 wrong function dimention\n");
+ exit(1);
+ }
+
+ env_pso = (cur_pso)?0:1;
+
+ if (pso_update_pbest_each_cycle_pending)
+ {
+ for (idx_i = 0; idx_i <4 ; idx_i++ )
+ {
+ y[idx_i] = psos[cur_pso].pbest_position_values[psos[cur_pso].popu_index][idx_i];
+ }
+ }
+ else
+ {
+ for (idx_i = 0; idx_i <4 ; idx_i++ )
+ {
+ y[idx_i] = psos[cur_pso].position_values[psos[cur_pso].popu_index][idx_i];
+ }
+ }
+
+ for (idx_i = 0; idx_i < (psos[env_pso].popu_size) ; idx_i++ )
+ {
+ if (pso_update_pbest_each_cycle_pending)
+ {
+ for (idx_j = 0; idx_j <7 ; idx_j++ )
+ {
+ x[idx_j] = psos[env_pso].pbest_position_values[idx_i][idx_j];
+ }
+ }
+ else
+ {
+ for (idx_j = 0; idx_j <7 ; idx_j++ )
+ {
+ x[idx_j] = psos[env_pso].position_values[idx_i][idx_j];
+ }
+ }
+
+ // constrains
+ r[0] = 2 * x[0] * x[0] + 3 * pow(x[1],4) + x[2] + 4 * x[3] * x[3] + 5 * x[4] - 127;
+ r[1] = 7 * x[0] + 3 * x[1] + 10 * x[2] * x[2] + x[3] - x[4] - 282;
+ r[2] = 23 * x[0] + x[1] * x[1] + 6 * x[5] * x[5] - 8 * x[6] - 196;
+ r[3] = 4 * x[0] * x[0] + x[1] * x[1] - 3 * x[0] * x[1] + 2 * x[2] * x[2] + 5 * x[5] - 11 * x[6];
+
+ temp_value = pow((x[0] - 10),2) + 5 * pow((x[1] - 12),2) + pow(x[2],4) + 3 * pow((x[3] - 11),2)
+ + 10 * pow((x[4]),6) + 7 * pow(x[5],2) + pow(x[6],4) - 4 * x[5] * x[6] - 10 * x[5] - 8 * x[6];
+
+ for (idx_j = 0; idx_j <4 ; idx_j++ )
+ {
+ if ((r[idx_j]) >= (-y[idx_j]/200.0))
+ {
+ p[idx_j] = y[idx_j] * r[idx_j] + 100 * r[idx_j] * r[idx_j];
+ }
+ else
+ {
+ p[idx_j] = - y[idx_j] * y[idx_j]/400.0;
+ }
+ temp_value += p[idx_j];
+ }
+ if (idx_i == 0)
+ {
+ temp_min = temp_value;
+ fit_value = temp_min;
+ }
+ else
+ {
+ fit_value = min(temp_value,temp_min);
+ temp_min = fit_value;
+ }
+
+ }
+
+ psos[cur_pso].eva_fun_value = fit_value;
+}
+
+
+
+/*************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+**************************************************/
+static void f6 (void)
+{
+ /*
+ This is the f6 function as described in the Handbook of
+ Genetic Algorithms, p.8
+ */
+ double num, denom, f6;
+
+ if (psos[0].dimension != 2)
+ {
+ printf("f6 wrong function dimention\n");
+ exit(1);
+ }
+
+ num = (sin(sqrt((psos[cur_pso].position_values[psos[cur_pso].popu_index][0]*psos[cur_pso].position_values[psos[cur_pso].popu_index][0])+(psos[cur_pso].position_values[psos[cur_pso].popu_index][1]*psos[cur_pso].position_values[psos[cur_pso].popu_index][1])))) *
+ (sin(sqrt((psos[cur_pso].position_values[psos[cur_pso].popu_index][0]*psos[cur_pso].position_values[psos[cur_pso].popu_index][0])+(psos[cur_pso].position_values[psos[cur_pso].popu_index][1]*psos[cur_pso].position_values[psos[cur_pso].popu_index][1])))) - 0.5;
+
+ denom = (1.0 + 0.001 * ((psos[cur_pso].position_values[psos[cur_pso].popu_index][0] * psos[cur_pso].position_values[psos[cur_pso].popu_index][0]) + (psos[cur_pso].position_values[psos[cur_pso].popu_index][1]*psos[cur_pso].position_values[psos[cur_pso].popu_index][1]))) *
+ (1.0 + 0.001 * ((psos[cur_pso].position_values[psos[cur_pso].popu_index][0] * psos[cur_pso].position_values[psos[cur_pso].popu_index][0]) + (psos[cur_pso].position_values[psos[cur_pso].popu_index][1]*psos[cur_pso].position_values[psos[cur_pso].popu_index][1])));
+
+ f6 = (double) 0.5 - (num/denom);
+
+ psos[cur_pso].eva_fun_value = 1 - f6;
+
+}
+
+/*************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+**************************************************/
+static void sphere (void)
+{
+ /* This is the familiar sphere model */
+
+ double result;
+ int idx_i;
+
+ result=0.0;
+
+ for (idx_i = 0; idx_i < ( psos[cur_pso].dimension ); idx_i++)
+ {
+ result += psos[cur_pso].position_values[psos[cur_pso].popu_index][idx_i] * psos[cur_pso].position_values[psos[cur_pso].popu_index][idx_i];
+ }
+
+ psos[cur_pso].eva_fun_value = result;
+}
+
+/*************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+**************************************************/
+static void rosenbrock (void)
+{
+
+ /* this is the Rosenbrock function */
+
+ int idx_i;
+ double result;
+
+ result=0.0;
+
+ for (idx_i = 1; idx_i < ( psos[cur_pso].dimension ); idx_i++)
+ {
+ result += 100.0*(psos[cur_pso].position_values[psos[cur_pso].popu_index][idx_i] - psos[cur_pso].position_values[psos[cur_pso].popu_index][idx_i - 1] * psos[cur_pso].position_values[psos[cur_pso].popu_index][idx_i - 1]) * (psos[cur_pso].position_values[psos[cur_pso].popu_index][idx_i] - psos[cur_pso].position_values[psos[cur_pso].popu_index][idx_i - 1] * psos[cur_pso].position_values[psos[cur_pso].popu_index][idx_i - 1]) + (psos[cur_pso].position_values[psos[cur_pso].popu_index][idx_i - 1]-1) * (psos[cur_pso].position_values[psos[cur_pso].popu_index][idx_i - 1]-1);
+ }
+
+ psos[cur_pso].eva_fun_value = fabs(result);
+}
+
+/*************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+**************************************************/
+static void rastrigrin (void)
+{
+ /* This is the generalized Rastrigrin function */
+
+ int idx_i;
+ double result;
+
+ result=0.0;
+
+ for (idx_i = 0;idx_i < ( psos[cur_pso].dimension ); idx_i++)
+ {
+ result +=psos[cur_pso].position_values[psos[cur_pso].popu_index][idx_i] * psos[cur_pso].position_values[psos[cur_pso].popu_index][idx_i] - 10.0*cos(2.0*3.141591 * psos[cur_pso].position_values[psos[cur_pso].popu_index][idx_i])+10.0;
+ }
+ psos[cur_pso].eva_fun_value = result;
+}
+
+/*************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+**************************************************/
+static void griewank (void)
+{
+ /* This is the generalized Griewank function */
+
+ int idx_i;
+ double result_s,result_p;
+
+ result_s=0.0;
+ result_p=1.0;
+
+ for (idx_i = 0; idx_i < ( psos[cur_pso].dimension ); idx_i++)
+ {
+ result_s +=psos[cur_pso].position_values[psos[cur_pso].popu_index][idx_i] * psos[cur_pso].position_values[psos[cur_pso].popu_index][idx_i];
+ result_p *=cos(psos[cur_pso].position_values[psos[cur_pso].popu_index][idx_i]/sqrt(idx_i+1));
+ }
+ psos[cur_pso].eva_fun_value = result_s/4000.0 - result_p +1;
+}
+
+/*************************************************/
+/* Inertia Weight Update Functions */
+/*************************************************/
+
+/*************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+**************************************************/
+static void iw_update_methods (int iw_type)
+{
+ switch (iw_type)
+ {
+ case CONSTANT_IW:
+ constant_iw();
+ break;
+ case LINEAR_IW:
+ linear_iw();
+ break;
+ case NOISE_ADDITION_IW:
+ noise_addition_iw();
+ break;
+ default:
+ break;
+ }
+}
+
+/*************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+**************************************************/
+static void constant_iw (void)
+{
+ psos[cur_pso].inertia_weight = psos[cur_pso].init_inertia_weight;
+}
+
+/*************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+**************************************************/
+static void linear_iw (void)
+{
+ int total_gen,cur_index;
+
+ total_gen = total_cycle_of_PSOs * psos[cur_pso].env_data.max_generation;
+ cur_index = pso_cycle_index * psos[cur_pso].env_data.max_generation + psos[cur_pso].gene_index;
+
+ psos[cur_pso].inertia_weight = ((psos[cur_pso].init_inertia_weight ) - 0.4 ) * ( total_gen - cur_index) / total_gen + 0.4 + ((rand()%600)/1000.0) - 0.3;
+}
+
+/*************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+**************************************************/
+static void noise_addition_iw (void)
+{
+ psos[cur_pso].inertia_weight = psos[cur_pso].init_inertia_weight + ((rand()%600)/1000.0) - 0.3 ;
+}
+
+/*************************************************/
+/* Initizalization Functions */
+/*************************************************/
+
+/*************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description: Symmetry Initialization
+*
+**************************************************/
+static void PSO_initialize_handler (int init_type)
+{
+ switch (init_type)
+ {
+ case PSO_RANDOM_SYMMETRY_INITIALIZE:
+ PSO_random_symmetry_initialize();
+ break;
+ case PSO_RANDOM_ASYMMETRY_INITIALIZE:
+ PSO_random_asymmetry_initialize();
+ break;
+ default:
+ break;
+ }
+}
+
+/*************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description: Symmetry Initialization
+*
+**************************************************/
+static void PSO_random_symmetry_initialize (void)
+{
+ int b;
+ for (b=0;b<(psos[cur_pso].dimension);b++)
+ {
+ if (psos[cur_pso].env_data.boundary_flag)
+ {
+ (psos[cur_pso].position_values)[psos[cur_pso].popu_index][b] = (float)((psos[cur_pso].env_data.up_boundaries[b]) - (psos[cur_pso].env_data.low_boundaries[b] ) ) * ((rand()%1000)/1000.0) + (psos[cur_pso].env_data.low_boundaries[b] );
+ psos[cur_pso].pbest_position_values[psos[cur_pso].popu_index][b] = (psos[cur_pso].position_values)[psos[cur_pso].popu_index][b];
+ psos[cur_pso].velocity_values[psos[cur_pso].popu_index][b] = 0.5* ((psos[cur_pso].env_data.up_boundaries[b]) - (psos[cur_pso].env_data.low_boundaries[b] ) ) * ((rand()%1000)/1000.0);
+ }
+ else
+ {
+ ((psos[cur_pso].position_values)[psos[cur_pso].popu_index][b] ) = (float) (((psos[cur_pso].env_data.init_range.right) - (psos[cur_pso].env_data.init_range.left ))*((rand()%1000)/1000.0) + (psos[cur_pso].env_data.init_range.left ));
+ psos[cur_pso].pbest_position_values[psos[cur_pso].popu_index][b] = (psos[cur_pso].position_values)[psos[cur_pso].popu_index][b];
+ psos[cur_pso].velocity_values[psos[cur_pso].popu_index][b] = (psos[cur_pso].env_data.max_velocity)*((rand()%1000)/1000.0);
+ }
+ if (((rand()%1000)/1000.0) > 0.5)
+ {
+ psos[cur_pso].velocity_values[psos[cur_pso].popu_index][b] = -(psos[cur_pso].velocity_values[psos[cur_pso].popu_index][b] );
+ }
+ }
+}
+
+/*************************************************
+* Function:
+*
+* Parameters:
+*
+* Returns:
+*
+* Description: Asymmetry initialization
+*
+**************************************************/
+static void PSO_random_asymmetry_initialize (void)
+{
+ int b;
+ for (b=0;b<(psos[cur_pso].dimension);b++)
+ {
+ if (psos[cur_pso].env_data.boundary_flag)
+ {
+ (psos[cur_pso].position_values)[psos[cur_pso].popu_index][b] = (float)((psos[cur_pso].env_data.up_boundaries[b]) - (psos[cur_pso].env_data.low_boundaries[b] ) ) * ((rand()%1000)/1000.0) + (psos[cur_pso].env_data.low_boundaries[b] );
+ psos[cur_pso].pbest_position_values[psos[cur_pso].popu_index][b] = (psos[cur_pso].position_values)[psos[cur_pso].popu_index][b];
+ psos[cur_pso].velocity_values[psos[cur_pso].popu_index][b] = 0.5* ((psos[cur_pso].env_data.up_boundaries[b]) - (psos[cur_pso].env_data.low_boundaries[b] ) ) * ((rand()%1000)/1000.0);
+ }
+ else
+ {
+ ((psos[cur_pso].position_values)[psos[cur_pso].popu_index][b] ) = (float) (((psos[cur_pso].env_data.init_range.right) - (psos[cur_pso].env_data.init_range.left ))*((rand()%1000)/1000.0) + (psos[cur_pso].env_data.init_range.left ));
+ psos[cur_pso].pbest_position_values[psos[cur_pso].popu_index][b] = (psos[cur_pso].position_values)[psos[cur_pso].popu_index][b];
+ psos[cur_pso].velocity_values[psos[cur_pso].popu_index][b] = (psos[cur_pso].env_data.max_velocity)*((rand()%1000)/1000.0);
+ }
+ if (((rand()%1000)/1000.0) > 0.5)
+ {
+ psos[cur_pso].velocity_values[psos[cur_pso].popu_index][b] = -(psos[cur_pso].velocity_values[psos[cur_pso].popu_index][b] );
+ }
+ }
+}
diff --git a/psos/psostate.h b/psos/psostate.h
new file mode 100644
index 0000000..0bc62a7
--- /dev/null
+++ b/psos/psostate.h
@@ -0,0 +1,130 @@
+#ifndef PSO_STATE_H
+#define PSO_STATE_H
+
+#include "definiti.h"
+
+/**************************************/
+/* Constants and Macros */
+/**************************************/
+
+/**************************************/
+/* Enumerations */
+/**************************************/
+
+// Listing 4.15 & 4.21 Definition of some new data types in the PSO implementation
+typedef enum PSO_State_Tag
+{
+ //PSO_INITIALIZE, // Initialize the population
+ PSO_UPDATE_INERTIA_WEIGHT, // Update inertia weight
+ PSO_EVALUATE, // Evaluate partiles
+ PSO_UPDATE_GLOBAL_BEST, // Update global best
+ PSO_UPDATE_LOCAL_BEST, // Update local best
+ PSO_UPDTAE_VELOCITY, // Update particle's velocity
+ PSO_UPDATE_POSITION, // Update particle's position
+ PSO_GOAL_REACH_JUDGE, // Judge whether reach the goal
+ PSO_NEXT_GENERATION, // Move to the next generation
+ PSO_UPDATE_PBEST_EACH_CYCLE, // Update pbest each cycle for co-pso due to the environment changed
+ PSO_NEXT_PSO, // Move to the next PSO in the same cycle or the first pso in the next cycle
+ PSOS_DONE, // Finish one cycle of PSOs
+ NUM_PSO_STATES // Total number of PSO states
+} PSO_State_Type;
+
+typedef enum PSO_Initialize_Tag
+{
+ PSO_RANDOM_SYMMETRY_INITIALIZE, // 0 :Symmetry Initialization
+ PSO_RANDOM_ASYMMETRY_INITIALIZE, // 1 :Symmetry Initialization
+ NUM_PSO_INITIALIZE // Number of initialization methods
+} PSO_Initialize_Type;
+
+typedef enum MINMAX_Tag
+{
+ MINIMIZATION, // 0 :Minimization problem
+ MAXIMIZATION // 1 :Maximization problem
+} MINMAX_Type;
+
+
+// Listing 4.23 Expanded Evaluate_Function_Type
+typedef enum Evaluate_Function_Tag
+{
+ G1_MIN, // 0 :G1 from Tahk and Sun's IEEE Trans EC paper, min part
+ G1_MAX, // 1 :G2 from Tahk and Sun's IEEE Trans EC paper, max part
+ G7_MIN, // 2 :G7 from Tahk and Sun's IEEE Trans EC paper, min part
+ G7_MAX, // 3 :G7 from Tahk and Sun's IEEE Trans EC paper, max part
+ G9_MIN, // 4 :G9 from Tahk and Sun's IEEE Trans EC paper, min part
+ G9_MAX, // 5 :G9 from Tahk and Sun's IEEE Trans EC paper, max part
+ F6, // 6 :F6: min
+ SPHERE, // 7 :Sphere: min
+ ROSENBROCK, // 8 :Rosenbrock: min
+ RASTRIGRIN, // 9 :Rastrigrin: min
+ GRIEWANK, // 10 :Griewank: min
+ NUM_EVALUATE_FUNCTIONS // Total number of evaluation functions
+} Evaluate_Function_Type;
+
+
+typedef enum Inertia_Weight_Update_Method_Tag
+{
+ CONSTANT_IW, // 0 :constant inertia weight
+ LINEAR_IW, // 1 :Linearly decreasing inertia weight
+ NOISE_ADDITION_IW, // 2 :Adding nosie to the constant inertia weight
+ NUM_IW_UPDATE_METHODS // Number of inertia weight update methods
+} IW_Update_Type;
+
+
+// Listing 4.16 Structure data type definition for PSO
+/**************************************/
+/* Structures */
+/**************************************/
+typedef struct PSO_Initizlize_Range_Type_Tag
+{
+ float left;
+ float right;
+} PSO_Initizlize_Range_Type;
+
+typedef struct PSO_Environment_Type_Tag // PSO working condition
+{
+ MINMAX_Type opti_type;
+ Evaluate_Function_Type function_type;
+ IW_Update_Type iw_method;
+ PSO_Initialize_Type init_type;
+ PSO_Initizlize_Range_Type init_range;
+ float max_velocity;
+ float max_position;
+ int max_generation;
+ int boundary_flag; // 1: boundary; 0: no boundary
+ FVECTOR low_boundaries;
+ FVECTOR up_boundaries;
+} PSO_Environment_Type;
+
+typedef struct PSO_Type_Tag // PSO parameters
+{
+ PSO_Environment_Type env_data;
+ int popu_size;
+ int dimension;
+ float inertia_weight;
+ float init_inertia_weight;
+ int global_best_index;
+ FVECTOR pbest_values;
+ FMATRIX velocity_values;
+ FMATRIX position_values;
+ FMATRIX pbest_position_values;
+ float eva_fun_value; // value obtained from evaluatation for current individual
+ int popu_index;
+ int gene_index;
+} PSO_Type;
+
+
+/**************************************/
+/* Global and Const Variable */
+/**************************************/
+
+/**************************************/
+/* Function Prototypes */
+/**************************************/
+extern void PSO_Main_Loop(void);
+extern void PSO_Start_Up(char *dataFile);
+extern void PSO_Clean_Up(void);
+
+#define min(x,y) ((x) < (y) ? (x) : (y))
+#define max(x,y) ((x) > (y) ? (x) : (y))
+
+#endif
|
marook/tagfs
|
4a6383ee9573fac369dba66e6f915c7de0bf784c
|
removes link to group because no longer existing
|
diff --git a/README b/README
index f644717..b66488e 100644
--- a/README
+++ b/README
@@ -1,217 +1,216 @@
tagfs - tag file system
1) Introduction
2) Requirements
3) Installation
4) Tagging Files
5) Usage
6) Configuration
6.1) Options
6.1.1) tagFileName
6.1.2) enableValueFilters
6.1.3) enableRootItemLinks
7) Freebase Integration
8) Bugs
9) Further Reading
10) Contact
---------------------------------------------------------------------
Introduction
tagfs is used to organize your files using tags.
This document contains basic usage instructions for users. To develop or debug
tagfs see the README.dev file.
---------------------------------------------------------------------
Requirements
* python 2.5, 2.6, 2.7
* Linux kernel with fuse enabled
* python-fuse installed
* python-matplotlib
---------------------------------------------------------------------
Installation
To install tagfs into your home directory type the following:
$ python setup.py test e2e_test install --home ~/.local
If you haven't already extended your local python path then add the following
to your environment configuration script. For example to your ~/.bashrc:
$ export PYTHONPATH=~/.local/lib/python:$PYTHONPATH
You may also need to add ~/.local/bin to your PATH environment variable:
$ export PATH=~/.local/bin:$PATH
---------------------------------------------------------------------
Tagging Files
Before you can filter anything using tagfs you need to tag your items. An item
is a directory which contains a file called .tag. All items must be below one
directory.
Let's create a simple item structure.
First we create the root directory for all items:
$ mkdir items
Then we create our first item:
$ mkdir items/Ted
We tag the 'Ted' item as movie:
$ echo movie >> items/Ted/.tag
We also tag 'Ted' as genre comedy:
$ echo 'genre: comedy' >> items/Ted/.tag
Then we add a second item:
$ mkdir items/banana
$ echo fruit >> items/banana/.tag
$ echo 'genre: delicious' >> items/banana/.tag
Modifying .tag files using echo, grep, sed may be a little hard sometimes.
There are some convenience scripts available through the tagfs-utils project.
See https://github.com/marook/tagfs-utils for details.
---------------------------------------------------------------------
Usage
After installation tagfs can be started the following way.
Mount a tagged directory:
$ tagfs -i /path/to/my/items/directory /path/to/my/mount/point
Unmount a tagged directory:
$ fusermount -u /path/to/my/mount/point
Right now tagfs reads the taggings only when it's getting mounted. So if you
modify the tags after mounting you will not see any changes in the tagfs file
system.
In general tagfs will try to reduce the number of filter directories below the
virtual file system. That's why you may not see some filters which would not
reduce the number of selected items.
---------------------------------------------------------------------
Configuration
tagfs can be configured through configuration files. Configuration files are
searched in different locations by tagfs. The following locations are used.
Locations with higher priority come first:
- <items directory>/.tagfs/tagfs.conf
- ~/.tagfs/tagfs.conf
- /etc/tagfs/tagfs.conf
Right now the following configuration options are supported.
---------------------------------------------------------------------
Configuration - Options - tagFileName
Through this option the name of the parsed tag files can be specified. The
default value is '.tag'.
Example:
[global]
tagFileName = ABOUT
---------------------------------------------------------------------
Configuration - Options - enableValueFilters
You can enable or disable value filters. If you enable value filters you will
see filter directories for each tag value. For value filters the tag's
context can be anyone. The default value is 'false'.
Example:
[global]
enableValueFilters = true
---------------------------------------------------------------------
Configuration - Options - enableRootItemLinks
To show links to all items in the tagfs '/' directory enable this option. The
default value is 'false'.
Example:
[global]
enableRootItemLinks = true
---------------------------------------------------------------------
Freebase Integration
Freebase is an open graph of people, places and things. See
http://www.freebase.com/ for details. tagfs allows you to extend your own
taggings with data directly from the freebase graph.
WARNING! Freebase support is currently experimental. It is very likely that the
freebase syntax within the .tag files will change in future releases of tagfs.
In order to use freebase you need to install the freebase-python bindings. They
are available via https://code.google.com/p/freebase-python/
To extend an item's taggings with freebase data you have to add a freebase query
to the item's .tag file. Here's an example:
_freebase: {"id": "/m/0clpml", "type": "/fictional_universe/fictional_character", "name": null, "occupation": null}
tagfs uses the freebase MQL query format which is described below the following
link http://wiki.freebase.com/wiki/MQL
The query properties with null values are added as context/tag pairs to the
.tag file's item.
Generic freebase mappings for all items can be specified in the file
'<items directory>/.tagfs/freebase'. Every line is one freebase query. You can
reference tagged values via the '$' operator. Here's an example MQL query with
some demo .tag files:
<items directory>/.tagfs/freebase:
{"type": "/film/film", "name": "$name", "genre": null, "directed_by": null}
<items directory>/Ted/.tag:
name: Ted
<items directory>/Family Guy/.tag:
name: Family Guy
When mounting this example the genre and director will be fetched from freebase
and made available as filtering directories.
---------------------------------------------------------------------
Bugs
Viewing existing and reporting new bugs can be done via the github issue
tracker:
https://github.com/marook/tagfs/issues
---------------------------------------------------------------------
Further Reading
Using a file system for my bank account (Markus Pielmeier)
http://pielmeier.blogspot.com/2010/08/using-file-system-for-my-bank-account.html
---------------------------------------------------------------------
Contact
* homepage: http://wiki.github.com/marook/tagfs
-* user group: http://groups.google.com/group/tagfs
* author: Markus Peröbner <[email protected]>
|
marook/tagfs
|
988fc91dd37c3189df06861eae39abf5fb558f07
|
Possible fix for issue 'Unable to set enableValueFilters=true, possible short term solution #9'
|
diff --git a/src/modules/tagfs/item_access.py b/src/modules/tagfs/item_access.py
index 2b0c607..367396c 100644
--- a/src/modules/tagfs/item_access.py
+++ b/src/modules/tagfs/item_access.py
@@ -1,353 +1,353 @@
#
# Copyright 2009 Markus Pielmeier
#
# This file is part of tagfs.
#
# tagfs is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# tagfs is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with tagfs. If not, see <http://www.gnu.org/licenses/>.
import logging
import os
import time
import traceback
from cache import cache
import sysIO
import freebase_support
class Tag(object):
def __init__(self, value, context = None):
if context == None:
self.context = None
else:
self.context = context.strip()
self.value = value.strip()
if not self.context == None and len(self.context) == 0:
# we don't allow empty strings as they can't be represented as a
# directory very well
raise ValueError()
if len(self.value) == 0:
# we don't allow empty strings as they can't be represented as a
# directory very well
raise ValueError()
def __hash__(self):
return (self.context, self.value).__hash__()
def __eq__(self, other):
return self.value == other.value and self.context == other.context
def __repr__(self):
return '<Tag %s: %s>' % (self.context, self.value)
def parseTagsFromFile(system, tagFileName):
"""Parses the tags from the specified file.
@return: The parsed values are returned as a set containing Tag objects.
@see: Tag
"""
tags = set()
with system.open(tagFileName, 'r') as tagFile:
for rawTag in tagFile:
rawTag = rawTag.strip()
try:
if len(rawTag) == 0:
continue
tagTuple = rawTag.split(':', 1)
if len(tagTuple) == 1:
tagContext = None
tagValue = tagTuple[0]
else:
tagContext = tagTuple[0]
tagValue = tagTuple[1]
tag = Tag(tagValue, context = tagContext)
tags.add(tag)
except:
logging.warning('Skipping tagging \'%s\' from file \'%s\' as it can\'t be parsed\n%s.' % (rawTag, tagFileName, traceback.format_exc()))
return tags
class NoSuchTagValue(Exception):
pass
class Item(object):
def __init__(self, name, system, itemAccess, freebaseQueryParser, freebaseAdapter, genericFreebaseQueries = [], parseTagsFromFile = parseTagsFromFile):
self.name = name
self.system = system
self.itemAccess = itemAccess
self.freebaseQueryParser = freebaseQueryParser
self.freebaseAdapter = freebaseAdapter
self.parseTagsFromFile = parseTagsFromFile
self.genericFreebaseQueries = genericFreebaseQueries
# TODO register at file system to receive tag file change events.
@property
@cache
def itemDirectory(self):
return os.path.join(self.itemAccess.dataDirectory, self.name)
@property
@cache
def _tagFileName(self):
"""Returns the name of the tag file for this item.
"""
return os.path.join(self.itemDirectory, self.itemAccess.tagFileName)
@property
@cache
def tagFileExists(self):
return self.system.pathExists(self._tagFileName)
def __getFreebaseTags(self, query):
try:
for context, values in self.freebaseAdapter.execute(query).iteritems():
for value in values:
# without the decode/encode operations fuse refuses to show
# directory entries which are based on freebase data
yield Tag(value.decode('ascii', 'ignore').encode('ascii'), context)
except Exception as e:
logging.error('Failed to execute freebase query %s: %s', query, e)
def __parseTags(self):
tagFileName = self._tagFileName
for rawTag in self.parseTagsFromFile(self.system, tagFileName):
if(rawTag.context == '_freebase'):
query = self.freebaseQueryParser.parse(rawTag.value)
for tag in self.__getFreebaseTags(query):
yield tag
else:
yield rawTag
@property
@cache
def tagsCreationTime(self):
if not self.tagFileExists:
return None
return os.path.getctime(self._tagFileName)
@property
@cache
def tagsModificationTime(self):
"""Returns the last time when the tags have been modified.
"""
if not self.tagFileExists:
return None
return os.path.getmtime(self._tagFileName)
@property
@cache
def tags(self):
"""Returns the tags as a list for this item.
"""
if not self.tagFileExists:
return None
tags = list(self.__parseTags())
def getValue(context):
for tag in tags:
if(tag.context == context):
return tag.value
raise NoSuchTagValue()
queryFactory = freebase_support.GenericQueryFactory(getValue)
for genericQuery in self.genericFreebaseQueries:
try:
query = queryFactory.createQuery(genericQuery.queryObject)
for tag in self.__getFreebaseTags(freebase_support.Query(query)):
tags.append(tag)
except NoSuchTagValue:
pass
return tags
@property
def values(self):
for t in self.tags:
yield t.value
def getTagsByContext(self, context):
for t in self.tags:
if context != t.context:
continue
yield t
def getValuesByContext(self, context):
return [t.value for t in self.getTagsByContext(context)]
def getValueByContext(self, context):
values = self.getValuesByContext(context)
valuesLen = len(values)
if(valuesLen == 0):
return None
if(valuesLen == 1):
return values[0]
raise Exception('Too many values found for context %s' % (context,))
def isTaggedWithContextValue(self, context, value):
for t in self.getTagsByContext(context):
if value == t.value:
return True
return False
def isTaggedWithContext(self, context):
# TODO don't create whole list... just check wheather list is empty
return (len([c for c in self.getTagsByContext(context)]) > 0)
def isTaggedWithValue(self, value):
for v in self.values:
if value == v:
return True
return False
@property
def tagged(self):
return self.tagFileExists
def __repr__(self):
return '<Item %s, %s>' % (self.name, self.tags)
class ItemAccess(object):
"""This is the access point to the Items.
"""
def __init__(self, system, dataDirectory, tagFileName, freebaseQueryParser, freebaseAdapter, genericFreebaseQueries):
self.system = system
self.dataDirectory = dataDirectory
self.tagFileName = tagFileName
self.freebaseQueryParser = freebaseQueryParser
self.freebaseAdapter = freebaseAdapter
self.genericFreebaseQueries = genericFreebaseQueries
self.parseTime = 0
def __parseItems(self):
items = {}
logging.debug('Start parsing items from dir: %s', self.dataDirectory)
for itemName in os.listdir(self.dataDirectory):
if itemName == '.tagfs':
# skip directory with configuration
continue
try:
item = Item(itemName, self.system, self, self.freebaseQueryParser, self.freebaseAdapter, self.genericFreebaseQueries)
items[itemName] = item
except IOError, (error, strerror):
logging.error('Can \'t read tags for item %s: %s',
itemName,
strerror)
logging.debug('Found %s items', len(items))
self.parseTime = time.time()
return items
@property
@cache
def items(self):
return self.__parseItems()
@property
@cache
def tags(self):
tags = set()
for item in self.items.itervalues():
if not item.tagged:
continue
-
- tags = tags | item.tags
+
+ tags = tags | set(item.tags)
return tags
@property
@cache
def taggedItems(self):
return set([item for item in self.items.itervalues() if item.tagged])
@property
@cache
def untaggedItems(self):
return set([item for item in self.items.itervalues() if not item.tagged])
def getItemDirectory(self, item):
return os.path.join(self.dataDirectory, item)
def contextTags(self, context):
contextTags = set()
for tag in self.tags:
if tag.context == context:
contextTags.add(tag)
return contextTags
@property
@cache
def contexts(self):
contexts = set()
for tag in self.tags:
if tag.context == None:
continue
contexts.add(tag.context)
return contexts
@property
@cache
def values(self):
values = set()
for tag in self.tags:
values.add(tag.value)
return values
def __str__(self):
return '[' + ', '.join([field + ': ' + str(self.__dict__[field]) for field in ['dataDirectory', 'tagFileName']]) + ']'
|
marook/tagfs
|
e463ba3f12e9425ec2ad224658e38e7cf22a44ca
|
Combined with previous fix, this effectively (though clumsily) fixes issue 'e2e_test pulling settings from configuration files #8'
|
diff --git a/test/e2e/anyContextValueFilter/items/.tagfs/tagfs.conf b/test/e2e/anyContextValueFilter/items/.tagfs/tagfs.conf
new file mode 100644
index 0000000..6cbd7cb
--- /dev/null
+++ b/test/e2e/anyContextValueFilter/items/.tagfs/tagfs.conf
@@ -0,0 +1,4 @@
+[global]
+tagFileName = .tag
+enableValueFilters = false
+enableRootItemLinks = false
diff --git a/test/e2e/contextValueFilter/items/.tagfs/tagfs.conf b/test/e2e/contextValueFilter/items/.tagfs/tagfs.conf
new file mode 100644
index 0000000..6cbd7cb
--- /dev/null
+++ b/test/e2e/contextValueFilter/items/.tagfs/tagfs.conf
@@ -0,0 +1,4 @@
+[global]
+tagFileName = .tag
+enableValueFilters = false
+enableRootItemLinks = false
diff --git a/test/e2e/contextValueRecursion_i7/items/.tagfs/tagfs.conf b/test/e2e/contextValueRecursion_i7/items/.tagfs/tagfs.conf
new file mode 100644
index 0000000..6cbd7cb
--- /dev/null
+++ b/test/e2e/contextValueRecursion_i7/items/.tagfs/tagfs.conf
@@ -0,0 +1,4 @@
+[global]
+tagFileName = .tag
+enableValueFilters = false
+enableRootItemLinks = false
diff --git a/test/e2e/emptyExport/items/.tagfs/tagfs.conf b/test/e2e/emptyExport/items/.tagfs/tagfs.conf
new file mode 100644
index 0000000..6cbd7cb
--- /dev/null
+++ b/test/e2e/emptyExport/items/.tagfs/tagfs.conf
@@ -0,0 +1,4 @@
+[global]
+tagFileName = .tag
+enableValueFilters = false
+enableRootItemLinks = false
diff --git a/test/e2e/umlauteItems/items/.tagfs/tagfs.conf b/test/e2e/umlauteItems/items/.tagfs/tagfs.conf
new file mode 100644
index 0000000..6cbd7cb
--- /dev/null
+++ b/test/e2e/umlauteItems/items/.tagfs/tagfs.conf
@@ -0,0 +1,4 @@
+[global]
+tagFileName = .tag
+enableValueFilters = false
+enableRootItemLinks = false
diff --git a/test/e2e/untaggedItems/items/.tagfs/tagfs.conf b/test/e2e/untaggedItems/items/.tagfs/tagfs.conf
new file mode 100644
index 0000000..6cbd7cb
--- /dev/null
+++ b/test/e2e/untaggedItems/items/.tagfs/tagfs.conf
@@ -0,0 +1,4 @@
+[global]
+tagFileName = .tag
+enableValueFilters = false
+enableRootItemLinks = false
|
marook/tagfs
|
ba72df0c8810590b3665d68ef4ac9a2b3d07daae
|
Fixed issue 'Configuration files reading in opposite order #10'
|
diff --git a/src/modules/tagfs/config.py b/src/modules/tagfs/config.py
index 9653d30..bf17fe0 100644
--- a/src/modules/tagfs/config.py
+++ b/src/modules/tagfs/config.py
@@ -1,62 +1,63 @@
#!/usr/bin/env python
#
# Copyright 2009, 2010, 2012 Markus Pielmeier
#
# This file is part of tagfs.
#
# tagfs is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# tagfs is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with tagfs. If not, see <http://www.gnu.org/licenses/>.
#
import ConfigParser
import logging
import os
def parseConfig(itemsDir):
config = ConfigParser.SafeConfigParser({
'tagFileName': '.tag',
'enableValueFilters': 'False',
'enableRootItemLinks': 'False',
})
config.add_section(Config.GLOBAL_SECTION)
- parsedFiles = config.read([os.path.join(itemsDir, '.tagfs', 'tagfs.conf'),
+ parsedFiles = config.read([os.path.join('/', 'etc', 'tagfs', 'tagfs.conf'),
os.path.expanduser(os.path.join('~', '.tagfs', 'tagfs.conf')),
- os.path.join('/', 'etc', 'tagfs', 'tagfs.conf')])
+ os.path.join(itemsDir, '.tagfs', 'tagfs.conf'),
+ ])
logging.debug('Parsed the following config files: %s' % ', '.join(parsedFiles))
return Config(config)
class Config(object):
GLOBAL_SECTION = 'global'
def __init__(self, _config):
self._config = _config
@property
def tagFileName(self):
return self._config.get(Config.GLOBAL_SECTION, 'tagFileName')
@property
def enableValueFilters(self):
return self._config.getboolean(Config.GLOBAL_SECTION, 'enableValueFilters')
@property
def enableRootItemLinks(self):
return self._config.getboolean(Config.GLOBAL_SECTION, 'enableRootItemLinks')
def __str__(self):
#return '[' + ', '.join([field + ': ' + str(self.__dict__[field]) for field in ['tagFileName', 'enableValueFilters', 'enableRootItemLinks']]) + ']'
return '[tagFileName: %s, enableValueFilters: %s, enableRootItemLinks: %s]' % (self.tagFileName, self.enableValueFilters, self.enableRootItemLinks)
|
marook/tagfs
|
a8f003c379a154c13c564bdbfd563925d116a2c8
|
[#7] fixed deep recursion in ContextValueListDirectoryNodes
|
diff --git a/bin/e2eAssertSandbox.sh b/bin/e2eAssertSandbox.sh
index 881b255..d521311 100755
--- a/bin/e2eAssertSandbox.sh
+++ b/bin/e2eAssertSandbox.sh
@@ -1,48 +1,57 @@
#!/bin/bash
set -e
ASSERT_BIN=$1
fail() {
echo "TEST FAILED: $1" >&2
exit 1
}
assertExists(){
local path="$1"
if [ ! -e "${path}" ]
then
fail "Expected path to exist: ${path}"
fi
}
+assertNotExists(){
+ local path="$1"
+
+ if [ -e "${path}" ]
+ then
+ fail "Expected path to not exist: ${path}"
+ fi
+}
+
assertLink(){
local path="$1"
assertExists "${path}"
if [ ! -L "${path}" ]
then
fail "Expected path to be link: ${path}"
fi
}
assertDir(){
local path="$1"
assertExists "${path}"
if [ ! -d "${path}" ]
then
fail "Expected path to be a directory: ${path}"
fi
}
assertEqualContent(){
cmp "$1" "$2" > /dev/null || fail "File content is not equal: $1 and $2 ($DIFF)"
}
cd `dirname "$ASSERT_BIN"`
. $ASSERT_BIN > "$ASSERT_BIN.log"
diff --git a/src/modules/tagfs/node_filter_context.py b/src/modules/tagfs/node_filter_context.py
index 3f67849..474251a 100644
--- a/src/modules/tagfs/node_filter_context.py
+++ b/src/modules/tagfs/node_filter_context.py
@@ -1,123 +1,120 @@
#
# Copyright 2011 Markus Pielmeier
#
# This file is part of tagfs.
#
# tagfs is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# tagfs is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with tagfs. If not, see <http://www.gnu.org/licenses/>.
#
from cache import cache
from node import Stat, ItemLinkNode, DirectoryNode
from node_filter import FilterDirectoryNode
from node_untagged_items import UntaggedItemsDirectoryNode
class ContextValueFilterDirectoryNode(FilterDirectoryNode):
def __init__(self, itemAccess, config, parentNode, context, value):
super(ContextValueFilterDirectoryNode, self).__init__(itemAccess, config)
self.parentNode = parentNode
self.context = context
self.value = value
@property
def name(self):
return self.value
@property
def items(self):
for item in self.parentNode.items:
if not item.isTaggedWithContextValue(self.context, self.value):
continue
yield item
class UnsetContextFilterDirectoryNode(FilterDirectoryNode):
def __init__(self, itemAccess, config, parentNode, context):
super(UnsetContextFilterDirectoryNode, self).__init__(itemAccess, config)
self.parentNode = parentNode
self.context = context
@property
def name(self):
return '.unset'
@property
def items(self):
for item in self.parentNode.parentNode.items:
if item.isTaggedWithContext(self.context):
continue
yield item
class ContextValueListDirectoryNode(DirectoryNode):
def __init__(self, itemAccess, config, parentNode, context):
self.itemAccess = itemAccess
self.config = config
self.parentNode = parentNode
self.context = context
@property
def name(self):
return self.context
@property
def attr(self):
s = super(ContextValueListDirectoryNode, self).attr
# TODO why nlink == 2?
s.st_nlink = 2
# TODO write test case which tests st_mtime == itemAccess.parseTime
s.st_mtime = self.itemAccess.parseTime
s.st_ctime = s.st_mtime
s.st_atime = s.st_mtime
return s
@property
def items(self):
for item in self.parentNode.items:
if not item.isTaggedWithContext(self.context):
continue
yield item
@property
def contextValues(self):
values = set()
for item in self.parentNode.items:
for tag in item.getTagsByContext(self.context):
values.add(tag.value)
return values
@property
def _entries(self):
yield UnsetContextFilterDirectoryNode(self.itemAccess, self.config, self, self.context)
for value in self.contextValues:
yield ContextValueFilterDirectoryNode(self.itemAccess, self.config, self, self.context, value)
def addsValue(self, parentItems):
- if(super(ContextValueListDirectoryNode, self).addsValue(parentItems)):
- return True
-
for e in self._entries:
if(e.addsValue(parentItems)):
return True
return False
diff --git a/src/test/tagfs_test_small/test_filter_context_value_filter_directory_node.py b/src/test/tagfs_test_small/test_filter_context_value_filter_directory_node.py
index 177cbd4..7471e97 100644
--- a/src/test/tagfs_test_small/test_filter_context_value_filter_directory_node.py
+++ b/src/test/tagfs_test_small/test_filter_context_value_filter_directory_node.py
@@ -1,81 +1,90 @@
#
# Copyright 2011 Markus Pielmeier
#
# This file is part of tagfs.
#
# tagfs is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# tagfs is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with tagfs. If not, see <http://www.gnu.org/licenses/>.
#
from unittest import TestCase
from tagfs.node_filter_context import ContextValueFilterDirectoryNode
from tagfs_test.node_asserter import validateDirectoryInterface, validateLinkInterface
from tagfs_test.item_access_mock import ItemAccessMock
from tagfs_test.item_mock import ItemMock
class TagMock(object):
def __init__(self, context, value):
self.context = context
self.value = value
class TaggedItemMock(ItemMock):
def __init__(self, name, context, value):
super(TaggedItemMock, self).__init__(name, [TagMock(context, value), ])
self._context = context
self._value = value
+ def isTaggedWithContext(self, context):
+ return self._context == context
+
def isTaggedWithContextValue(self, context, value):
return self._context == context and self._value == value
+ def getTagsByContext(self, context):
+ if(context == self._context):
+ return self.tags
+ else:
+ return []
+
class ParentNodeMock(object):
def __init__(self, items):
self.items = items
class ConfigMock(object):
@property
def enableValueFilters(self):
return False
class TestContextValueFilterDirectoryNode(TestCase):
def setUp(self):
self.context = 'c1'
self.value = 'v1'
self.itemAccess = ItemAccessMock()
self.itemAccess.taggedItems = [TaggedItemMock('item1', self.context, self.value), ]
self.config = ConfigMock()
self.parentNode = ParentNodeMock(self.itemAccess.taggedItems)
self.node = ContextValueFilterDirectoryNode(self.itemAccess, self.config, self.parentNode, self.context, self.value)
def testNodeAttrMTimeIsItemAccessParseTime(self):
attr = self.node.attr
self.assertEqual(self.itemAccess.parseTime, attr.st_mtime)
def testNodeIsDirectory(self):
validateDirectoryInterface(self, self.node)
def testMatchingItemIsAvailableAsLink(self):
e = self.node.entries['item1']
validateLinkInterface(self, e)
diff --git a/test/e2e/contextValueRecursion_i7/assert b/test/e2e/contextValueRecursion_i7/assert
new file mode 100755
index 0000000..89630a1
--- /dev/null
+++ b/test/e2e/contextValueRecursion_i7/assert
@@ -0,0 +1,5 @@
+#!/bin/bash
+
+# This test makes sure that recursions without extra information are not
+# visible. See issue https://github.com/marook/tagfs/issues/7
+assertNotExists "$TEST_MOUNT_DIR/genre/comedy/genre"
diff --git a/test/e2e/contextValueRecursion_i7/items/Ted/.tag b/test/e2e/contextValueRecursion_i7/items/Ted/.tag
new file mode 100644
index 0000000..b5b2ea8
--- /dev/null
+++ b/test/e2e/contextValueRecursion_i7/items/Ted/.tag
@@ -0,0 +1,2 @@
+movie
+genre: comedy
diff --git a/test/e2e/contextValueRecursion_i7/items/banana/.tag b/test/e2e/contextValueRecursion_i7/items/banana/.tag
new file mode 100644
index 0000000..b212677
--- /dev/null
+++ b/test/e2e/contextValueRecursion_i7/items/banana/.tag
@@ -0,0 +1,2 @@
+fruit
+genre: delicious
|
marook/tagfs
|
4a9697d29caf4a1600a1596cfab766523b23c498
|
improved readability of e2eAssertSandbox.sh
|
diff --git a/bin/e2eAssertSandbox.sh b/bin/e2eAssertSandbox.sh
index 6e3bc87..881b255 100755
--- a/bin/e2eAssertSandbox.sh
+++ b/bin/e2eAssertSandbox.sh
@@ -1,48 +1,48 @@
#!/bin/bash
set -e
ASSERT_BIN=$1
fail() {
echo "TEST FAILED: $1" >&2
exit 1
}
assertExists(){
- P=$1
+ local path="$1"
- if [ ! -e "$P" ]
+ if [ ! -e "${path}" ]
then
- fail "Expected path to exist: $P"
+ fail "Expected path to exist: ${path}"
fi
}
assertLink(){
- P=$1
+ local path="$1"
- assertExists "$P"
+ assertExists "${path}"
- if [ ! -L "$P" ]
+ if [ ! -L "${path}" ]
then
- fail "Expected path to be link: $P"
+ fail "Expected path to be link: ${path}"
fi
}
assertDir(){
- P=$1
+ local path="$1"
- assertExists "$P"
+ assertExists "${path}"
- if [ ! -d "$P" ]
+ if [ ! -d "${path}" ]
then
- fail "Expected path to be a directory: $P"
+ fail "Expected path to be a directory: ${path}"
fi
}
assertEqualContent(){
cmp "$1" "$2" > /dev/null || fail "File content is not equal: $1 and $2 ($DIFF)"
}
cd `dirname "$ASSERT_BIN"`
. $ASSERT_BIN > "$ASSERT_BIN.log"
|
marook/tagfs
|
f18bbe67a83442893c8bbac06a8200bab7cf6f0c
|
fixed item ordering in charts, enabled grid in charts
|
diff --git a/src/modules/tagfs/node_export_chart.py b/src/modules/tagfs/node_export_chart.py
index 1c55aa6..acd0722 100644
--- a/src/modules/tagfs/node_export_chart.py
+++ b/src/modules/tagfs/node_export_chart.py
@@ -1,78 +1,80 @@
#
# Copyright 2013 Markus Pielmeier
#
# This file is part of tagfs.
#
# tagfs is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# tagfs is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with tagfs. If not, see <http://www.gnu.org/licenses/>.
#
from cache import cache
from node_file import FileNode
import pylab
import cStringIO
class ChartImageNode(FileNode):
def __init__(self, itemAccess, parentNode, context, title, transform):
self.itemAccess = itemAccess
self.parentNode = parentNode
self.context = context
self.title = title
self.transform = transform
@property
def name(self):
return '%s-%s.png' % (self.title, self.context,)
@property
def items(self):
return self.parentNode.items
@property
@cache
def content(self):
pylab.clf()
xValues = []
yValues = []
- for x, item in enumerate(self.items):
+ for x, item in enumerate(sorted(self.items, key = lambda item: item.name)):
for tag in item.tags:
c = tag.context
if(c != self.context):
continue
try:
y = float(tag.value)
except:
y = None
if(y is None):
try:
# some love for our german people
y = float(tag.value.replace('.', '').replace(',', '.'))
except:
continue
xValues.append(x)
yValues.append(self.transform(y))
pylab.plot(xValues, yValues, label = self.context)
+ pylab.grid(True)
+
out = cStringIO.StringIO()
pylab.savefig(out, format = 'png')
return out.getvalue()
|
marook/tagfs
|
19d5ef3a5efe0ea466d3d01db0f1e2253d67d201
|
added python-matplotlib dependency to README
|
diff --git a/README b/README
index 5dbda0b..679584c 100644
--- a/README
+++ b/README
@@ -1,216 +1,217 @@
tagfs - tag file system
1) Introduction
2) Requirements
3) Installation
4) Tagging Files
5) Usage
6) Configuration
6.1) Options
6.1.1) tagFileName
6.1.2) enableValueFilters
6.1.3) enableRootItemLinks
7) Freebase Integration
8) Bugs
9) Further Reading
10) Contact
---------------------------------------------------------------------
Introduction
tagfs is used to organize your files using tags.
This document contains basic usage instructions for users. To develop or debug
tagfs see the README.dev file.
---------------------------------------------------------------------
Requirements
* python 2.5, 2.6, 2.7
* Linux kernel with fuse enabled
* python-fuse installed
+* python-matplotlib
---------------------------------------------------------------------
Installation
To install tagfs into your home directory type the following:
$ python setup.py test e2e_test install --home ~/.local
If you haven't already extended your local python path then add the following
to your environment configuration script. For example to your ~/.bashrc:
$ export PYTHONPATH=~/.local/lib/python:$PYTHONPATH
You may also need to add ~/.local/bin to your PATH environment variable:
$ export PATH=~/.local/bin:$PATH
---------------------------------------------------------------------
Tagging Files
Before you can filter anything using tagfs you need to tag your items. An item
is a directory which contains a file called .tag. All items must be below one
directory.
Let's create a simple item structure.
First we create the root directory for all items:
$ mkdir items
Then we create our first item:
$ mkdir items/Ted
We tag the 'Ted' item as movie:
$ echo movie >> items/Ted/.tag
We also tag 'Ted' as genre comedy:
$ echo 'genre: comedy' >> items/Ted/.tag
Then we add a second item:
$ mkdir items/banana
$ echo fruit >> items/banana/.tag
$ echo 'genre: delicious' >> items/banana/.tag
Modifying .tag files using echo, grep, sed may be a little hard sometimes.
There are some convenience scripts available through the tagfs-utils project.
See https://github.com/marook/tagfs-utils for details.
---------------------------------------------------------------------
Usage
After installation tagfs can be started the following way.
Mount a tagged directory:
$ tagfs -i /path/to/my/items/directory /path/to/my/mount/point
Unmount a tagged directory:
$ fusermount -u /path/to/my/mount/point
Right now tagfs reads the taggings only when it's getting mounted. So if you
modify the tags after mounting you will not see any changes in the tagfs file
system.
In general tagfs will try to reduce the number of filter directories below the
virtual file system. That's why you may not see some filters which would not
reduce the number of selected items.
---------------------------------------------------------------------
Configuration
tagfs can be configured through configuration files. Configuration files are
searched in different locations by tagfs. The following locations are used.
Locations with higher priority come first:
- <items directory>/.tagfs/tagfs.conf
- ~/.tagfs/tagfs.conf
- /etc/tagfs/tagfs.conf
Right now the following configuration options are supported.
---------------------------------------------------------------------
Configuration - Options - tagFileName
Through this option the name of the parsed tag files can be specified. The
default value is '.tag'.
Example:
[global]
tagFileName = ABOUT
---------------------------------------------------------------------
Configuration - Options - enableValueFilters
You can enable or disable value filters. If you enable value filters you will
see filter directories for each tag value. For value filters the tag's
context can be anyone. The default value is 'false'.
Example:
[global]
enableValueFilters = true
---------------------------------------------------------------------
Configuration - Options - enableRootItemLinks
To show links to all items in the tagfs '/' directory enable this option. The
default value is 'false'.
Example:
[global]
enableRootItemLinks = true
---------------------------------------------------------------------
Freebase Integration
Freebase is an open graph of people, places and things. See
http://www.freebase.com/ for details. tagfs allows you to extend your own
taggings with data directly from the freebase graph.
WARNING! Freebase support is currently experimental. It is very likely that the
freebase syntax within the .tag files will change in future releases of tagfs.
In order to use freebase you need to install the freebase-python bindings. They
are available via https://code.google.com/p/freebase-python/
To extend an item's taggings with freebase data you have to add a freebase query
to the item's .tag file. Here's an example:
_freebase: {"id": "/m/0clpml", "type": "/fictional_universe/fictional_character", "name": null, "occupation": null}
tagfs uses the freebase MQL query format which is described below the following
link http://wiki.freebase.com/wiki/MQL
The query properties with null values are added as context/tag pairs to the
.tag file's item.
Generic freebase mappings for all items can be specified in the file
'<items directory>/.tagfs/freebase'. Every line is one freebase query. You can
reference tagged values via the '$' operator. Here's an example MQL query with
some demo .tag files:
<items directory>/.tagfs/freebase:
{"type": "/film/film", "name": "$name", "genre": null, "directed_by": null}
<items directory>/Ted/.tag:
name: Ted
<items directory>/Family Guy/.tag:
name: Family Guy
When mounting this example the genre and director will be fetched from freebase
and made available as filtering directories.
---------------------------------------------------------------------
Bugs
Viewing existing and reporting new bugs can be done via the github issue
tracker:
https://github.com/marook/tagfs/issues
---------------------------------------------------------------------
Further Reading
Using a file system for my bank account (Markus Pielmeier)
http://pielmeier.blogspot.com/2010/08/using-file-system-for-my-bank-account.html
---------------------------------------------------------------------
Contact
* homepage: http://wiki.github.com/marook/tagfs
* user group: http://groups.google.com/group/tagfs
* author: Markus Pielmeier <[email protected]>
|
marook/tagfs
|
47ce4930d026ea40d6284c2a81c1a62b94ce31f3
|
added sum chart
|
diff --git a/src/modules/tagfs/node_export.py b/src/modules/tagfs/node_export.py
index 62052eb..502c4d8 100644
--- a/src/modules/tagfs/node_export.py
+++ b/src/modules/tagfs/node_export.py
@@ -1,59 +1,70 @@
#
# Copyright 2011 Markus Pielmeier
#
# This file is part of tagfs.
#
# tagfs is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# tagfs is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with tagfs. If not, see <http://www.gnu.org/licenses/>.
#
from cache import cache
from node import Stat, ItemLinkNode, DirectoryNode
from node_untagged_items import UntaggedItemsDirectoryNode
from node_export_csv import ExportCsvFileNode
from node_export_chart import ChartImageNode
+class SumTransformation(object):
+
+ def __init__(self):
+ self.sum = 0.0
+
+ def transform(self, y):
+ self.sum += y
+
+ return self.sum
+
class ExportDirectoryNode(DirectoryNode):
def __init__(self, itemAccess, parentNode):
self.itemAccess = itemAccess
self.parentNode = parentNode
@property
def name(self):
return '.export'
@property
def attr(self):
s = super(ExportDirectoryNode, self).attr
# TODO why nlink == 2?
s.st_nlink = 2
# TODO write test case which tests st_mtime == itemAccess.parseTime
s.st_mtime = self.itemAccess.parseTime
s.st_ctime = s.st_mtime
s.st_atime = s.st_mtime
return s
@property
def items(self):
return self.parentNode.items
@property
def _entries(self):
yield ExportCsvFileNode(self.itemAccess, self.parentNode)
for context in self.parentNode.contexts:
- yield ChartImageNode(self.itemAccess, self.parentNode, context)
+ yield ChartImageNode(self.itemAccess, self.parentNode, context, 'value', lambda y: y)
+ yield ChartImageNode(self.itemAccess, self.parentNode, context, 'sum', SumTransformation().transform)
diff --git a/src/modules/tagfs/node_export_chart.py b/src/modules/tagfs/node_export_chart.py
index 0aca5ac..1c55aa6 100644
--- a/src/modules/tagfs/node_export_chart.py
+++ b/src/modules/tagfs/node_export_chart.py
@@ -1,76 +1,78 @@
#
# Copyright 2013 Markus Pielmeier
#
# This file is part of tagfs.
#
# tagfs is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# tagfs is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with tagfs. If not, see <http://www.gnu.org/licenses/>.
#
from cache import cache
from node_file import FileNode
import pylab
import cStringIO
class ChartImageNode(FileNode):
- def __init__(self, itemAccess, parentNode, context):
+ def __init__(self, itemAccess, parentNode, context, title, transform):
self.itemAccess = itemAccess
self.parentNode = parentNode
self.context = context
+ self.title = title
+ self.transform = transform
@property
def name(self):
- return 'chart-%s.png' % (self.context,)
+ return '%s-%s.png' % (self.title, self.context,)
@property
def items(self):
return self.parentNode.items
@property
@cache
def content(self):
pylab.clf()
xValues = []
yValues = []
for x, item in enumerate(self.items):
for tag in item.tags:
c = tag.context
if(c != self.context):
continue
try:
y = float(tag.value)
except:
y = None
if(y is None):
try:
# some love for our german people
- y = float(tag.value.replace(',', '.'))
+ y = float(tag.value.replace('.', '').replace(',', '.'))
except:
continue
xValues.append(x)
- yValues.append(y)
+ yValues.append(self.transform(y))
pylab.plot(xValues, yValues, label = self.context)
out = cStringIO.StringIO()
pylab.savefig(out, format = 'png')
return out.getvalue()
|
marook/tagfs
|
bc638e3bfedabf2ca9f12523cfc909c1a7146655
|
added value charts
|
diff --git a/src/modules/tagfs/node_export.py b/src/modules/tagfs/node_export.py
index 39ebbf3..62052eb 100644
--- a/src/modules/tagfs/node_export.py
+++ b/src/modules/tagfs/node_export.py
@@ -1,57 +1,59 @@
#
# Copyright 2011 Markus Pielmeier
#
# This file is part of tagfs.
#
# tagfs is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# tagfs is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with tagfs. If not, see <http://www.gnu.org/licenses/>.
#
from cache import cache
from node import Stat, ItemLinkNode, DirectoryNode
from node_untagged_items import UntaggedItemsDirectoryNode
from node_export_csv import ExportCsvFileNode
from node_export_chart import ChartImageNode
class ExportDirectoryNode(DirectoryNode):
def __init__(self, itemAccess, parentNode):
self.itemAccess = itemAccess
self.parentNode = parentNode
@property
def name(self):
return '.export'
@property
def attr(self):
s = super(ExportDirectoryNode, self).attr
# TODO why nlink == 2?
s.st_nlink = 2
# TODO write test case which tests st_mtime == itemAccess.parseTime
s.st_mtime = self.itemAccess.parseTime
s.st_ctime = s.st_mtime
s.st_atime = s.st_mtime
return s
@property
def items(self):
return self.parentNode.items
@property
def _entries(self):
yield ExportCsvFileNode(self.itemAccess, self.parentNode)
- yield ChartImageNode(self.itemAccess, self.parentNode)
+
+ for context in self.parentNode.contexts:
+ yield ChartImageNode(self.itemAccess, self.parentNode, context)
diff --git a/src/modules/tagfs/node_export_chart.py b/src/modules/tagfs/node_export_chart.py
index 1c51259..0aca5ac 100644
--- a/src/modules/tagfs/node_export_chart.py
+++ b/src/modules/tagfs/node_export_chart.py
@@ -1,51 +1,76 @@
#
# Copyright 2013 Markus Pielmeier
#
# This file is part of tagfs.
#
# tagfs is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# tagfs is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with tagfs. If not, see <http://www.gnu.org/licenses/>.
#
+from cache import cache
from node_file import FileNode
import pylab
import cStringIO
class ChartImageNode(FileNode):
- def __init__(self, itemAccess, parentNode):
+ def __init__(self, itemAccess, parentNode, context):
self.itemAccess = itemAccess
self.parentNode = parentNode
+ self.context = context
@property
def name(self):
- return 'chart.png'
+ return 'chart-%s.png' % (self.context,)
@property
def items(self):
return self.parentNode.items
@property
+ @cache
def content(self):
pylab.clf()
- x = [0.0, 1.0, 2.0]
- y = [1.0, 2.0, 1.0]
+ xValues = []
+ yValues = []
- pylab.plot(x, y)
+ for x, item in enumerate(self.items):
+ for tag in item.tags:
+ c = tag.context
+
+ if(c != self.context):
+ continue
+
+ try:
+ y = float(tag.value)
+ except:
+ y = None
+
+ if(y is None):
+ try:
+ # some love for our german people
+ y = float(tag.value.replace(',', '.'))
+ except:
+ continue
+
+ xValues.append(x)
+ yValues.append(y)
+
+ pylab.plot(xValues, yValues, label = self.context)
out = cStringIO.StringIO()
pylab.savefig(out, format = 'png')
return out.getvalue()
|
marook/tagfs
|
d8f94d7a6ece5ab01b2d8e12aa6407e330be2588
|
implemented rendering demo chart
|
diff --git a/src/modules/tagfs/node_export_chart.py b/src/modules/tagfs/node_export_chart.py
index 41652b6..1c51259 100644
--- a/src/modules/tagfs/node_export_chart.py
+++ b/src/modules/tagfs/node_export_chart.py
@@ -1,33 +1,51 @@
#
# Copyright 2013 Markus Pielmeier
#
# This file is part of tagfs.
#
# tagfs is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# tagfs is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with tagfs. If not, see <http://www.gnu.org/licenses/>.
#
from node_file import FileNode
+import pylab
+import cStringIO
class ChartImageNode(FileNode):
def __init__(self, itemAccess, parentNode):
- pass
+ self.itemAccess = itemAccess
+ self.parentNode = parentNode
@property
def name(self):
return 'chart.png'
+ @property
+ def items(self):
+ return self.parentNode.items
+
@property
def content(self):
- return ''
+ pylab.clf()
+
+ x = [0.0, 1.0, 2.0]
+ y = [1.0, 2.0, 1.0]
+
+ pylab.plot(x, y)
+
+ out = cStringIO.StringIO()
+
+ pylab.savefig(out, format = 'png')
+
+ return out.getvalue()
|
marook/tagfs
|
10c53dbce6f460048ad095d4bd8169d8095a1780
|
added generate chart image structure
|
diff --git a/src/modules/tagfs/node_export.py b/src/modules/tagfs/node_export.py
index 36fc57d..39ebbf3 100644
--- a/src/modules/tagfs/node_export.py
+++ b/src/modules/tagfs/node_export.py
@@ -1,55 +1,57 @@
#
# Copyright 2011 Markus Pielmeier
#
# This file is part of tagfs.
#
# tagfs is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# tagfs is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with tagfs. If not, see <http://www.gnu.org/licenses/>.
#
from cache import cache
from node import Stat, ItemLinkNode, DirectoryNode
from node_untagged_items import UntaggedItemsDirectoryNode
from node_export_csv import ExportCsvFileNode
+from node_export_chart import ChartImageNode
class ExportDirectoryNode(DirectoryNode):
def __init__(self, itemAccess, parentNode):
self.itemAccess = itemAccess
self.parentNode = parentNode
@property
def name(self):
return '.export'
@property
def attr(self):
s = super(ExportDirectoryNode, self).attr
# TODO why nlink == 2?
s.st_nlink = 2
# TODO write test case which tests st_mtime == itemAccess.parseTime
s.st_mtime = self.itemAccess.parseTime
s.st_ctime = s.st_mtime
s.st_atime = s.st_mtime
return s
@property
def items(self):
return self.parentNode.items
@property
def _entries(self):
yield ExportCsvFileNode(self.itemAccess, self.parentNode)
+ yield ChartImageNode(self.itemAccess, self.parentNode)
diff --git a/src/modules/tagfs/node_export_chart.py b/src/modules/tagfs/node_export_chart.py
new file mode 100644
index 0000000..41652b6
--- /dev/null
+++ b/src/modules/tagfs/node_export_chart.py
@@ -0,0 +1,33 @@
+#
+# Copyright 2013 Markus Pielmeier
+#
+# This file is part of tagfs.
+#
+# tagfs is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# tagfs is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with tagfs. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from node_file import FileNode
+
+class ChartImageNode(FileNode):
+
+ def __init__(self, itemAccess, parentNode):
+ pass
+
+ @property
+ def name(self):
+ return 'chart.png'
+
+ @property
+ def content(self):
+ return ''
diff --git a/src/modules/tagfs/node_export_csv.py b/src/modules/tagfs/node_export_csv.py
index c493989..bb75c94 100644
--- a/src/modules/tagfs/node_export_csv.py
+++ b/src/modules/tagfs/node_export_csv.py
@@ -1,107 +1,86 @@
#
# Copyright 2011 Markus Pielmeier
#
# This file is part of tagfs.
#
# tagfs is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# tagfs is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with tagfs. If not, see <http://www.gnu.org/licenses/>.
#
-import stat
-
from cache import cache
-from node import Stat
+from node_file import FileNode
-class ExportCsvFileNode(object):
+class ExportCsvFileNode(FileNode):
COL_SEPARATOR = ';'
TEXT_CHAR = '"'
ROW_SEPARATOR = '\n'
TAG_VALUE_SEPARATOR = '\n'
def __init__(self, itemAccess, parentNode):
self.itemAccess = itemAccess
self.parentNode = parentNode
@property
def name(self):
return 'export.csv'
@property
def items(self):
return self.parentNode.items
def formatRow(self, row):
first = True
for col in row:
if first:
first = False
else:
yield ExportCsvFileNode.COL_SEPARATOR
# TODO escape TEXT_CHAR in col string
yield ExportCsvFileNode.TEXT_CHAR
yield str(col)
yield ExportCsvFileNode.TEXT_CHAR
yield ExportCsvFileNode.ROW_SEPARATOR
@property
def _content(self):
contexts = set()
for i in self.items:
for t in i.tags:
contexts.add(t.context)
headline = ['name', ]
for c in contexts:
headline.append(c)
for s in self.formatRow(headline):
yield s
for i in self.items:
row = [i.name, ]
for c in contexts:
row.append(ExportCsvFileNode.TAG_VALUE_SEPARATOR.join([t.value for t in i.getTagsByContext(c)]))
for s in self.formatRow(row):
yield s
@property
@cache
def content(self):
return ''.join(self._content)
-
- @property
- def attr(self):
- s = Stat()
-
- s.st_mode = stat.S_IFREG | 0444
- s.st_nlink = 2
-
- # TODO replace with memory saving size calculation
- import array
- s.st_size = len(array.array('c', self.content))
-
- return s
-
- def open(self, path, flags):
- return
-
- def read(self, path, size, offset):
- return self.content[offset:offset + size]
diff --git a/src/modules/tagfs/node_file.py b/src/modules/tagfs/node_file.py
new file mode 100644
index 0000000..2776109
--- /dev/null
+++ b/src/modules/tagfs/node_file.py
@@ -0,0 +1,42 @@
+#
+# Copyright 2013 Markus Pielmeier
+#
+# This file is part of tagfs.
+#
+# tagfs is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# tagfs is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with tagfs. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import array
+import stat
+from node import Stat
+
+class FileNode(object):
+
+ @property
+ def attr(self):
+ s = Stat()
+
+ s.st_mode = stat.S_IFREG | 0444
+ s.st_nlink = 2
+
+ # TODO replace with memory saving size calculation
+ s.st_size = len(array.array('c', self.content))
+
+ return s
+
+ def open(self, path, flags):
+ return
+
+ def read(self, path, size, offset):
+ return self.content[offset:offset + size]
|
marook/tagfs
|
b225ecee9f0575a879ed03a2c3b2d1399a9ef086
|
fixed my email address
|
diff --git a/setup.py b/setup.py
old mode 100644
new mode 100755
index 20609bd..c7cb0a4
--- a/setup.py
+++ b/setup.py
@@ -1,295 +1,295 @@
#!/usr/bin/env python
#
# Copyright 2009 Peter Prohaska
#
# This file is part of tagfs.
#
# tagfs is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# tagfs is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with tagfs. If not, see <http://www.gnu.org/licenses/>.
#
from distutils.core import setup, Command
import sys
import os
from os.path import (
basename,
dirname,
abspath,
splitext,
join as pjoin
)
from glob import glob
from unittest import TestLoader, TextTestRunner
import re
import datetime
from subprocess import call
projectdir = dirname(abspath(__file__))
reportdir = pjoin(projectdir, 'reports')
srcdir = pjoin(projectdir, 'src')
bindir = pjoin(srcdir, 'bin')
moddir = pjoin(srcdir, 'modules')
testdir = pjoin(srcdir, 'test')
endToEndTestDir = pjoin(projectdir, 'test', 'e2e')
testdatadir = pjoin(projectdir, 'etc', 'test', 'events')
testmntdir = pjoin(projectdir, 'mnt')
assert os.path.isdir(srcdir)
assert os.path.isdir(bindir)
assert os.path.isdir(moddir)
assert os.path.isdir(testdir)
assert os.path.isdir(testdatadir)
class Report(object):
def __init__(self):
self.reportDateTime = datetime.datetime.utcnow()
self.reportDir = os.path.join(reportdir, self.reportDateTime.strftime('%Y-%m-%d_%H_%M_%S'))
# fails when dir already exists which is nice
os.makedirs(self.reportDir)
@property
def coverageReportFileName(self):
return os.path.join(self.reportDir, 'coverage.txt')
@property
def unitTestReportFileName(self):
return os.path.join(self.reportDir, 'tests.txt')
def sourceFiles():
yield os.path.join(bindir, 'tagfs')
sourceFilePattern = re.compile('^.*[.]py$')
for root, dirs, files in os.walk(moddir):
for f in files:
if(not sourceFilePattern.match(f)):
continue
if(f.startswith('.#')):
continue
yield os.path.join(root, f)
def fullSplit(p):
head, tail = os.path.split(p)
if(len(head) > 0):
for n in fullSplit(head):
yield n
yield tail
def testModules():
testFilePattern = re.compile('^(test.*)[.]py$', re.IGNORECASE)
for root, dirs, files in os.walk(testdir):
for f in files:
m = testFilePattern.match(f)
if(not m):
continue
relDir = os.path.relpath(root, testdir)
yield '.'.join([n for n in fullSplit(relDir)] + [m.group(1), ])
def printFile(fileName):
if(not os.path.exists(fileName)):
# TODO maybe we should not silently return?
return
with open(fileName, 'r') as f:
for line in f:
sys.stdout.write(line)
class TestFailException(Exception):
'''Indicates that at lease one of the unit tests has failed
'''
pass
class test(Command):
description = 'run tests'
user_options = []
def initialize_options(self):
self._cwd = os.getcwd()
self._verbosity = 2
def finalize_options(self): pass
def run(self):
report = Report()
tests = [m for m in testModules()]
print "..using:"
print " moddir:", moddir
print " testdir:", testdir
print " testdatadir:", testdatadir
print " testmntdir:", testmntdir
print " tests:", tests
print " sys.path:", sys.path
print
# insert project lookup paths at index 0 to make sure they are used
# over global libraries
sys.path.insert(0, moddir)
sys.path.insert(0, testdir)
# TODO try to import all test cases here. the TestLoader is throwing
# very confusing errors when imports can't be resolved.
# configure logging
# TODO not sure how to enable this... it's a bit complicate to enable
# logging only for 'make mt' and disable it then for
# 'python setup.py test'. 'python setup.py test' is such a gabber...
#if 'DEBUG' in os.environ:
# from tagfs import log_config
# log_config.setUpLogging()
if 'DEBUG' in os.environ:
import logging
logging.basicConfig(level = logging.DEBUG)
suite = TestLoader().loadTestsFromNames(tests)
try:
with open(report.unitTestReportFileName, 'w') as testResultsFile:
r = TextTestRunner(stream = testResultsFile, verbosity = self._verbosity)
def runTests():
result = r.run(suite)
if(not result.wasSuccessful()):
raise TestFailException()
try:
import coverage
c = coverage.coverage()
c.start()
runTests()
c.stop()
with open(report.coverageReportFileName, 'w') as reportFile:
c.report([f for f in sourceFiles()], file = reportFile)
except ImportError:
# TODO ImportErrors from runTests() may look like coverage is missing
print ''
print 'coverage module not found.'
print 'To view source coverage stats install http://nedbatchelder.com/code/coverage/'
print ''
runTests()
finally:
# TODO use two streams instead of printing files after writing
printFile(report.unitTestReportFileName)
printFile(report.coverageReportFileName)
class EndToEndTestFailure(Exception):
def __init__(self, testPath):
super(EndToEndTestFailure, self).__init__('end-to-end test failed: %s' % testPath)
class EndToEndTests(Command):
description = 'execute the end-to-end tests'
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def runTest(self, testPath):
if(not call(['bin/runEndToEndTest.sh', testPath]) is 0):
raise EndToEndTestFailure(testPath)
def run(self):
for endToEndDirName in os.listdir(endToEndTestDir):
testPath = os.path.join(endToEndTestDir, endToEndDirName)
if(not os.path.isdir(testPath)):
continue
self.runTest(testPath)
# Overrides default clean (which cleans from build runs)
# This clean should probably be hooked into that somehow.
class clean_pyc(Command):
description = 'remove *.pyc files from source directory'
user_options = []
def initialize_options(self):
self._delete = []
for cwd, dirs, files in os.walk(projectdir):
self._delete.extend(
pjoin(cwd, f) for f in files if f.endswith('.pyc')
)
def finalize_options(self):
pass
def run(self):
for f in self._delete:
try:
os.unlink(f)
except OSError, e:
print "Strange '%s': %s" % (f, e)
# Could be a directory.
# Can we detect file in use errors or are they OSErrors
# as well?
# Shall we catch all?
setup(
cmdclass = {
'test': test,
'clean_pyc': clean_pyc,
'e2e_test': EndToEndTests,
},
name = 'tagfs',
version = '0.1',
url = 'http://wiki.github.com/marook/tagfs',
description = '',
long_description = '',
author = 'Markus Pielmeier',
- author_email = '[email protected]',
+ author_email = '[email protected]',
license = 'GPLv3',
download_url = 'http://github.com/marook/tagfs/downloads/tagfs_0.1-src.tar.bz2',
platforms = 'Linux',
requires = [],
classifiers = [
'Development Status :: 2 - Pre-Alpha',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: GNU General Public License (GPL)',
'Natural Language :: English',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python',
'Topic :: System :: Filesystems'
],
data_files = [
(pjoin('share', 'doc', 'tagfs'), ['AUTHORS', 'COPYING', 'README'])
],
# TODO maybe we should include src/bin/*?
scripts = [pjoin(bindir, 'tagfs')],
packages = ['tagfs'],
package_dir = {'': moddir},
)
|
marook/tagfs
|
2767231c6930ffff4e73ed249902137f8ded6e22
|
umlaute are now possible again in item names (also added testcase)
|
diff --git a/src/modules/tagfs/item_access.py b/src/modules/tagfs/item_access.py
index 6af7d7d..2b0c607 100644
--- a/src/modules/tagfs/item_access.py
+++ b/src/modules/tagfs/item_access.py
@@ -1,351 +1,353 @@
#
# Copyright 2009 Markus Pielmeier
#
# This file is part of tagfs.
#
# tagfs is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# tagfs is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with tagfs. If not, see <http://www.gnu.org/licenses/>.
import logging
import os
import time
import traceback
from cache import cache
import sysIO
import freebase_support
class Tag(object):
def __init__(self, value, context = None):
if context == None:
self.context = None
else:
self.context = context.strip()
self.value = value.strip()
if not self.context == None and len(self.context) == 0:
# we don't allow empty strings as they can't be represented as a
# directory very well
raise ValueError()
if len(self.value) == 0:
# we don't allow empty strings as they can't be represented as a
# directory very well
raise ValueError()
def __hash__(self):
return (self.context, self.value).__hash__()
def __eq__(self, other):
return self.value == other.value and self.context == other.context
def __repr__(self):
return '<Tag %s: %s>' % (self.context, self.value)
def parseTagsFromFile(system, tagFileName):
"""Parses the tags from the specified file.
@return: The parsed values are returned as a set containing Tag objects.
@see: Tag
"""
tags = set()
with system.open(tagFileName, 'r') as tagFile:
for rawTag in tagFile:
rawTag = rawTag.strip()
try:
if len(rawTag) == 0:
continue
tagTuple = rawTag.split(':', 1)
if len(tagTuple) == 1:
tagContext = None
tagValue = tagTuple[0]
else:
tagContext = tagTuple[0]
tagValue = tagTuple[1]
tag = Tag(tagValue, context = tagContext)
tags.add(tag)
except:
logging.warning('Skipping tagging \'%s\' from file \'%s\' as it can\'t be parsed\n%s.' % (rawTag, tagFileName, traceback.format_exc()))
return tags
class NoSuchTagValue(Exception):
pass
class Item(object):
def __init__(self, name, system, itemAccess, freebaseQueryParser, freebaseAdapter, genericFreebaseQueries = [], parseTagsFromFile = parseTagsFromFile):
self.name = name
self.system = system
self.itemAccess = itemAccess
self.freebaseQueryParser = freebaseQueryParser
self.freebaseAdapter = freebaseAdapter
self.parseTagsFromFile = parseTagsFromFile
self.genericFreebaseQueries = genericFreebaseQueries
# TODO register at file system to receive tag file change events.
@property
@cache
def itemDirectory(self):
return os.path.join(self.itemAccess.dataDirectory, self.name)
@property
@cache
def _tagFileName(self):
"""Returns the name of the tag file for this item.
"""
return os.path.join(self.itemDirectory, self.itemAccess.tagFileName)
@property
@cache
def tagFileExists(self):
return self.system.pathExists(self._tagFileName)
def __getFreebaseTags(self, query):
try:
for context, values in self.freebaseAdapter.execute(query).iteritems():
for value in values:
- yield Tag(value, context)
+ # without the decode/encode operations fuse refuses to show
+ # directory entries which are based on freebase data
+ yield Tag(value.decode('ascii', 'ignore').encode('ascii'), context)
except Exception as e:
logging.error('Failed to execute freebase query %s: %s', query, e)
def __parseTags(self):
tagFileName = self._tagFileName
for rawTag in self.parseTagsFromFile(self.system, tagFileName):
if(rawTag.context == '_freebase'):
query = self.freebaseQueryParser.parse(rawTag.value)
for tag in self.__getFreebaseTags(query):
yield tag
else:
yield rawTag
@property
@cache
def tagsCreationTime(self):
if not self.tagFileExists:
return None
return os.path.getctime(self._tagFileName)
@property
@cache
def tagsModificationTime(self):
"""Returns the last time when the tags have been modified.
"""
if not self.tagFileExists:
return None
return os.path.getmtime(self._tagFileName)
@property
@cache
def tags(self):
"""Returns the tags as a list for this item.
"""
if not self.tagFileExists:
return None
tags = list(self.__parseTags())
def getValue(context):
for tag in tags:
if(tag.context == context):
return tag.value
raise NoSuchTagValue()
queryFactory = freebase_support.GenericQueryFactory(getValue)
for genericQuery in self.genericFreebaseQueries:
try:
query = queryFactory.createQuery(genericQuery.queryObject)
for tag in self.__getFreebaseTags(freebase_support.Query(query)):
tags.append(tag)
except NoSuchTagValue:
pass
return tags
@property
def values(self):
for t in self.tags:
yield t.value
def getTagsByContext(self, context):
for t in self.tags:
if context != t.context:
continue
yield t
def getValuesByContext(self, context):
return [t.value for t in self.getTagsByContext(context)]
def getValueByContext(self, context):
values = self.getValuesByContext(context)
valuesLen = len(values)
if(valuesLen == 0):
return None
if(valuesLen == 1):
return values[0]
raise Exception('Too many values found for context %s' % (context,))
def isTaggedWithContextValue(self, context, value):
for t in self.getTagsByContext(context):
if value == t.value:
return True
return False
def isTaggedWithContext(self, context):
# TODO don't create whole list... just check wheather list is empty
return (len([c for c in self.getTagsByContext(context)]) > 0)
def isTaggedWithValue(self, value):
for v in self.values:
if value == v:
return True
return False
@property
def tagged(self):
return self.tagFileExists
def __repr__(self):
return '<Item %s, %s>' % (self.name, self.tags)
class ItemAccess(object):
"""This is the access point to the Items.
"""
def __init__(self, system, dataDirectory, tagFileName, freebaseQueryParser, freebaseAdapter, genericFreebaseQueries):
self.system = system
self.dataDirectory = dataDirectory
self.tagFileName = tagFileName
self.freebaseQueryParser = freebaseQueryParser
self.freebaseAdapter = freebaseAdapter
self.genericFreebaseQueries = genericFreebaseQueries
self.parseTime = 0
def __parseItems(self):
items = {}
logging.debug('Start parsing items from dir: %s', self.dataDirectory)
for itemName in os.listdir(self.dataDirectory):
if itemName == '.tagfs':
# skip directory with configuration
continue
try:
item = Item(itemName, self.system, self, self.freebaseQueryParser, self.freebaseAdapter, self.genericFreebaseQueries)
items[itemName] = item
except IOError, (error, strerror):
logging.error('Can \'t read tags for item %s: %s',
itemName,
strerror)
logging.debug('Found %s items', len(items))
self.parseTime = time.time()
return items
@property
@cache
def items(self):
return self.__parseItems()
@property
@cache
def tags(self):
tags = set()
for item in self.items.itervalues():
if not item.tagged:
continue
tags = tags | item.tags
return tags
@property
@cache
def taggedItems(self):
return set([item for item in self.items.itervalues() if item.tagged])
@property
@cache
def untaggedItems(self):
return set([item for item in self.items.itervalues() if not item.tagged])
def getItemDirectory(self, item):
return os.path.join(self.dataDirectory, item)
def contextTags(self, context):
contextTags = set()
for tag in self.tags:
if tag.context == context:
contextTags.add(tag)
return contextTags
@property
@cache
def contexts(self):
contexts = set()
for tag in self.tags:
if tag.context == None:
continue
contexts.add(tag.context)
return contexts
@property
@cache
def values(self):
values = set()
for tag in self.tags:
values.add(tag.value)
return values
def __str__(self):
return '[' + ', '.join([field + ': ' + str(self.__dict__[field]) for field in ['dataDirectory', 'tagFileName']]) + ']'
diff --git a/src/modules/tagfs/view.py b/src/modules/tagfs/view.py
index e988a54..bb89462 100644
--- a/src/modules/tagfs/view.py
+++ b/src/modules/tagfs/view.py
@@ -1,176 +1,174 @@
#!/usr/bin/env python
#
# Copyright 2009, 2010 Markus Pielmeier
#
# This file is part of tagfs.
#
# tagfs is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# tagfs is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with tagfs. If not, see <http://www.gnu.org/licenses/>.
#
import errno
import logging
import os
from log import logCall, logException
from cache import cache
from transient_dict import TransientDict
from node_root import RootDirectoryNode
from fuse import Direntry
class View(object):
"""Abstraction layer from fuse API.
This class is an abstraction layer from the fuse API. This should ease
writing test cases for the file system.
"""
DEFAULT_NODES = {
# directory icons for rox filer
'.DirIcon': None,
# launch script for rox filer application directories
'AppRun': None
}
def __init__(self, itemAccess, config):
self.itemAccess = itemAccess
self.config = config
self._entryCache = TransientDict(100)
@property
@cache
def rootNode(self):
return RootDirectoryNode(self.itemAccess, self.config)
def getNode(self, path):
if path in self._entryCache:
# simple path name based caching is implemented here
logging.debug('tagfs _entryCache hit')
return self._entryCache[path]
# ps contains the path segments
ps = [x for x in os.path.normpath(path).split(os.sep) if x != '']
psLen = len(ps)
if psLen > 0:
lastSegment = ps[psLen - 1]
if lastSegment in View.DEFAULT_NODES:
logging.debug('Using default node for path ' + path)
return View.DEFAULT_NODES[lastSegment]
e = self.rootNode
for pe in path.split('/')[1:]:
if pe == '':
continue
entries = e.entries
if not pe in entries:
# it seems like we are trying to fetch a node for an illegal
# path
return None
e = entries[pe]
logging.debug('tagfs _entryCache miss')
self._entryCache[path] = e
return e
@logCall
def getattr(self, path):
e = self.getNode(path)
if not e:
logging.debug('Try to read attributes from not existing node: ' + path)
return -errno.ENOENT
return e.attr
@logCall
def readdir(self, path, offset):
e = self.getNode(path)
if not e:
logging.warn('Try to read not existing directory: ' + path)
return -errno.ENOENT
# TODO care about offset parameter
- # without the decode/encode operations fuse refuses to show directory
- # entries which are based on freebase data
- return [Direntry(name.decode('ascii', 'ignore').encode('ascii')) for name in e.entries.iterkeys()]
+ return [Direntry(name) for name in e.entries.iterkeys()]
@logCall
def readlink(self, path):
n = self.getNode(path)
if not n:
logging.warn('Try to read not existing link from node: ' + path)
return -errno.ENOENT
return n.link
@logCall
def symlink(self, path, linkPath):
linkPathSegs = linkPath.split('/')
n = self.getNode('/'.join(linkPathSegs[0:len(linkPathSegs) - 2]))
if not n:
return -errno.ENOENT
return n.symlink(path, linkPath)
@logCall
def open(self, path, flags):
n = self.getNode(path)
if not n:
logging.warn('Try to open not existing node: ' + path)
return -errno.ENOENT
return n.open(path, flags)
@logCall
def read(self, path, len, offset):
n = self.getNode(path)
if not n:
logging.warn('Try to read from not existing node: ' + path)
return -errno.ENOENT
return n.read(path, len, offset)
@logCall
def write(self, path, data, pos):
n = self.getNode(path)
if not n:
logging.warn('Try to write to not existing node: ' + path)
return -errno.ENOENT
return n.write(path, data, pos)
diff --git a/test/e2e/umlauteItems/assert b/test/e2e/umlauteItems/assert
index 6311d12..5ca400c 100755
--- a/test/e2e/umlauteItems/assert
+++ b/test/e2e/umlauteItems/assert
@@ -1,4 +1,6 @@
#!/bin/bash
assertDir "$TEST_MOUNT_DIR/.any_context/umlaut"
assertDir "$TEST_MOUNT_DIR/.any_context/umlaut/äöü"
+
+ls "$TEST_MOUNT_DIR/.any_context/umlaut"
|
marook/tagfs
|
b8d29adadc703fc1023a8c5ed1984ff42c4d0d52
|
added generic freebase query demo to README
|
diff --git a/README b/README
index c485db6..5dbda0b 100644
--- a/README
+++ b/README
@@ -1,202 +1,216 @@
tagfs - tag file system
1) Introduction
2) Requirements
3) Installation
4) Tagging Files
5) Usage
6) Configuration
6.1) Options
6.1.1) tagFileName
6.1.2) enableValueFilters
6.1.3) enableRootItemLinks
7) Freebase Integration
8) Bugs
9) Further Reading
10) Contact
---------------------------------------------------------------------
Introduction
tagfs is used to organize your files using tags.
This document contains basic usage instructions for users. To develop or debug
tagfs see the README.dev file.
---------------------------------------------------------------------
Requirements
* python 2.5, 2.6, 2.7
* Linux kernel with fuse enabled
* python-fuse installed
---------------------------------------------------------------------
Installation
To install tagfs into your home directory type the following:
$ python setup.py test e2e_test install --home ~/.local
If you haven't already extended your local python path then add the following
to your environment configuration script. For example to your ~/.bashrc:
$ export PYTHONPATH=~/.local/lib/python:$PYTHONPATH
You may also need to add ~/.local/bin to your PATH environment variable:
$ export PATH=~/.local/bin:$PATH
---------------------------------------------------------------------
Tagging Files
Before you can filter anything using tagfs you need to tag your items. An item
is a directory which contains a file called .tag. All items must be below one
directory.
Let's create a simple item structure.
First we create the root directory for all items:
$ mkdir items
Then we create our first item:
$ mkdir items/Ted
We tag the 'Ted' item as movie:
$ echo movie >> items/Ted/.tag
We also tag 'Ted' as genre comedy:
$ echo 'genre: comedy' >> items/Ted/.tag
Then we add a second item:
$ mkdir items/banana
$ echo fruit >> items/banana/.tag
$ echo 'genre: delicious' >> items/banana/.tag
Modifying .tag files using echo, grep, sed may be a little hard sometimes.
There are some convenience scripts available through the tagfs-utils project.
See https://github.com/marook/tagfs-utils for details.
---------------------------------------------------------------------
Usage
After installation tagfs can be started the following way.
Mount a tagged directory:
$ tagfs -i /path/to/my/items/directory /path/to/my/mount/point
Unmount a tagged directory:
$ fusermount -u /path/to/my/mount/point
Right now tagfs reads the taggings only when it's getting mounted. So if you
modify the tags after mounting you will not see any changes in the tagfs file
system.
In general tagfs will try to reduce the number of filter directories below the
virtual file system. That's why you may not see some filters which would not
reduce the number of selected items.
---------------------------------------------------------------------
Configuration
tagfs can be configured through configuration files. Configuration files are
searched in different locations by tagfs. The following locations are used.
Locations with higher priority come first:
- <items directory>/.tagfs/tagfs.conf
- ~/.tagfs/tagfs.conf
- /etc/tagfs/tagfs.conf
Right now the following configuration options are supported.
---------------------------------------------------------------------
Configuration - Options - tagFileName
Through this option the name of the parsed tag files can be specified. The
default value is '.tag'.
Example:
[global]
tagFileName = ABOUT
---------------------------------------------------------------------
Configuration - Options - enableValueFilters
You can enable or disable value filters. If you enable value filters you will
see filter directories for each tag value. For value filters the tag's
context can be anyone. The default value is 'false'.
Example:
[global]
enableValueFilters = true
---------------------------------------------------------------------
Configuration - Options - enableRootItemLinks
To show links to all items in the tagfs '/' directory enable this option. The
default value is 'false'.
Example:
[global]
enableRootItemLinks = true
---------------------------------------------------------------------
Freebase Integration
Freebase is an open graph of people, places and things. See
http://www.freebase.com/ for details. tagfs allows you to extend your own
taggings with data directly from the freebase graph.
WARNING! Freebase support is currently experimental. It is very likely that the
freebase syntax within the .tag files will change in future releases of tagfs.
In order to use freebase you need to install the freebase-python bindings. They
are available via https://code.google.com/p/freebase-python/
To extend an item's taggings with freebase data you have to add a freebase query
to the item's .tag file. Here's an example:
_freebase: {"id": "/m/0clpml", "type": "/fictional_universe/fictional_character", "name": null, "occupation": null}
tagfs uses the freebase MQL query format which is described below the following
link http://wiki.freebase.com/wiki/MQL
The query properties with null values are added as context/tag pairs to the
.tag file's item.
Generic freebase mappings for all items can be specified in the file
-'<items directory>/.tagfs/freebase'. Every line is one freebase query.
+'<items directory>/.tagfs/freebase'. Every line is one freebase query. You can
+reference tagged values via the '$' operator. Here's an example MQL query with
+some demo .tag files:
+
+<items directory>/.tagfs/freebase:
+{"type": "/film/film", "name": "$name", "genre": null, "directed_by": null}
+
+<items directory>/Ted/.tag:
+name: Ted
+
+<items directory>/Family Guy/.tag:
+name: Family Guy
+
+When mounting this example the genre and director will be fetched from freebase
+and made available as filtering directories.
---------------------------------------------------------------------
Bugs
Viewing existing and reporting new bugs can be done via the github issue
tracker:
https://github.com/marook/tagfs/issues
---------------------------------------------------------------------
Further Reading
Using a file system for my bank account (Markus Pielmeier)
http://pielmeier.blogspot.com/2010/08/using-file-system-for-my-bank-account.html
---------------------------------------------------------------------
Contact
* homepage: http://wiki.github.com/marook/tagfs
* user group: http://groups.google.com/group/tagfs
* author: Markus Pielmeier <[email protected]>
|
marook/tagfs
|
41e948c07fc3bb8fd53225441c38a201488e2d31
|
implemented freebase query inheritance
|
diff --git a/README b/README
index 94b75fb..c485db6 100644
--- a/README
+++ b/README
@@ -1,199 +1,202 @@
tagfs - tag file system
1) Introduction
2) Requirements
3) Installation
4) Tagging Files
5) Usage
6) Configuration
6.1) Options
6.1.1) tagFileName
6.1.2) enableValueFilters
6.1.3) enableRootItemLinks
7) Freebase Integration
8) Bugs
9) Further Reading
10) Contact
---------------------------------------------------------------------
Introduction
tagfs is used to organize your files using tags.
This document contains basic usage instructions for users. To develop or debug
tagfs see the README.dev file.
---------------------------------------------------------------------
Requirements
* python 2.5, 2.6, 2.7
* Linux kernel with fuse enabled
* python-fuse installed
---------------------------------------------------------------------
Installation
To install tagfs into your home directory type the following:
$ python setup.py test e2e_test install --home ~/.local
If you haven't already extended your local python path then add the following
to your environment configuration script. For example to your ~/.bashrc:
$ export PYTHONPATH=~/.local/lib/python:$PYTHONPATH
You may also need to add ~/.local/bin to your PATH environment variable:
$ export PATH=~/.local/bin:$PATH
---------------------------------------------------------------------
Tagging Files
Before you can filter anything using tagfs you need to tag your items. An item
is a directory which contains a file called .tag. All items must be below one
directory.
Let's create a simple item structure.
First we create the root directory for all items:
$ mkdir items
Then we create our first item:
$ mkdir items/Ted
We tag the 'Ted' item as movie:
$ echo movie >> items/Ted/.tag
We also tag 'Ted' as genre comedy:
$ echo 'genre: comedy' >> items/Ted/.tag
Then we add a second item:
$ mkdir items/banana
$ echo fruit >> items/banana/.tag
$ echo 'genre: delicious' >> items/banana/.tag
Modifying .tag files using echo, grep, sed may be a little hard sometimes.
There are some convenience scripts available through the tagfs-utils project.
See https://github.com/marook/tagfs-utils for details.
---------------------------------------------------------------------
Usage
After installation tagfs can be started the following way.
Mount a tagged directory:
$ tagfs -i /path/to/my/items/directory /path/to/my/mount/point
Unmount a tagged directory:
$ fusermount -u /path/to/my/mount/point
Right now tagfs reads the taggings only when it's getting mounted. So if you
modify the tags after mounting you will not see any changes in the tagfs file
system.
In general tagfs will try to reduce the number of filter directories below the
virtual file system. That's why you may not see some filters which would not
reduce the number of selected items.
---------------------------------------------------------------------
Configuration
tagfs can be configured through configuration files. Configuration files are
searched in different locations by tagfs. The following locations are used.
Locations with higher priority come first:
- <items directory>/.tagfs/tagfs.conf
- ~/.tagfs/tagfs.conf
- /etc/tagfs/tagfs.conf
Right now the following configuration options are supported.
---------------------------------------------------------------------
Configuration - Options - tagFileName
Through this option the name of the parsed tag files can be specified. The
default value is '.tag'.
Example:
[global]
tagFileName = ABOUT
---------------------------------------------------------------------
Configuration - Options - enableValueFilters
You can enable or disable value filters. If you enable value filters you will
see filter directories for each tag value. For value filters the tag's
context can be anyone. The default value is 'false'.
Example:
[global]
enableValueFilters = true
---------------------------------------------------------------------
Configuration - Options - enableRootItemLinks
To show links to all items in the tagfs '/' directory enable this option. The
default value is 'false'.
Example:
[global]
enableRootItemLinks = true
---------------------------------------------------------------------
Freebase Integration
Freebase is an open graph of people, places and things. See
http://www.freebase.com/ for details. tagfs allows you to extend your own
taggings with data directly from the freebase graph.
WARNING! Freebase support is currently experimental. It is very likely that the
freebase syntax within the .tag files will change in future releases of tagfs.
In order to use freebase you need to install the freebase-python bindings. They
are available via https://code.google.com/p/freebase-python/
To extend an item's taggings with freebase data you have to add a freebase query
to the item's .tag file. Here's an example:
_freebase: {"id": "/m/0clpml", "type": "/fictional_universe/fictional_character", "name": null, "occupation": null}
tagfs uses the freebase MQL query format which is described below the following
link http://wiki.freebase.com/wiki/MQL
The query properties with null values are added as context/tag pairs to the
.tag file's item.
+Generic freebase mappings for all items can be specified in the file
+'<items directory>/.tagfs/freebase'. Every line is one freebase query.
+
---------------------------------------------------------------------
Bugs
Viewing existing and reporting new bugs can be done via the github issue
tracker:
https://github.com/marook/tagfs/issues
---------------------------------------------------------------------
Further Reading
Using a file system for my bank account (Markus Pielmeier)
http://pielmeier.blogspot.com/2010/08/using-file-system-for-my-bank-account.html
---------------------------------------------------------------------
Contact
* homepage: http://wiki.github.com/marook/tagfs
* user group: http://groups.google.com/group/tagfs
* author: Markus Pielmeier <[email protected]>
diff --git a/src/modules/tagfs/freebase_support.py b/src/modules/tagfs/freebase_support.py
index 24b13cf..e285d80 100644
--- a/src/modules/tagfs/freebase_support.py
+++ b/src/modules/tagfs/freebase_support.py
@@ -1,86 +1,128 @@
#
# Copyright 2012 Markus Pielmeier
#
# This file is part of tagfs.
#
# tagfs is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# tagfs is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with tagfs. If not, see <http://www.gnu.org/licenses/>.
import json
import logging
def createFreebaseAdapter():
# freebase is an optional dependency. tagfs should execute even if it's not
# available.
try:
import freebase
logging.info('freebase support enabled')
return FreebaseAdapter()
except ImportError:
logging.warn('freebase support disabled')
return FreebaseAdapterStub()
class FreebaseAdapterStub(object):
def execute(self, *args, **kwargs):
return {}
class FreebaseAdapter(object):
def execute(self, query):
import freebase
fbResult = freebase.mqlread(query.freebaseQuery)
result = {}
for key in query.selectedKeys:
result[key] = fbResult[key]
return result
class Query(object):
def __init__(self, queryObject):
self.queryObject = queryObject
@property
def freebaseQuery(self):
q = {}
for key, value in self.queryObject.iteritems():
if(value is None):
q[key] = []
else:
q[key] = value
return q
@property
def queryString(self):
# TODO this func is only used in tests => remove
return json.dumps(self.freebaseQuery, separators = (',', ':'))
@property
def selectedKeys(self):
for key, value in self.queryObject.iteritems():
if(value is None):
yield key
class QueryParser(object):
def parse(self, queryString):
return Query(json.loads(queryString))
+
+class QueryFileParser(object):
+
+ def __init__(self, system, queryParser):
+ self.system = system
+ self.queryParser = queryParser
+
+ def parseFile(self, path):
+ with self.system.open(path, 'r') as f:
+ for line in f:
+ yield self.queryParser.parse(line)
+
+class GenericQueryFactory(object):
+
+ def __init__(self, resolveVar):
+ self.resolveVar = resolveVar
+
+ def evaluate(self, value):
+ if(value is None):
+ return None
+
+ valueLen = len(value)
+
+ if(valueLen < 2):
+ return value
+
+ if(value[0] != '$'):
+ return value
+
+ key = value[1:]
+
+ return self.resolveVar(key)
+
+ def createQuery(self, genericQuery):
+ q = {}
+
+ for key, genericValue in genericQuery.iteritems():
+ value = self.evaluate(genericValue)
+
+ q[key] = value
+
+ return q
diff --git a/src/modules/tagfs/item_access.py b/src/modules/tagfs/item_access.py
index af2a9e3..6af7d7d 100644
--- a/src/modules/tagfs/item_access.py
+++ b/src/modules/tagfs/item_access.py
@@ -1,303 +1,351 @@
#
# Copyright 2009 Markus Pielmeier
#
# This file is part of tagfs.
#
# tagfs is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# tagfs is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with tagfs. If not, see <http://www.gnu.org/licenses/>.
import logging
import os
import time
import traceback
from cache import cache
import sysIO
+import freebase_support
class Tag(object):
def __init__(self, value, context = None):
if context == None:
self.context = None
else:
self.context = context.strip()
self.value = value.strip()
if not self.context == None and len(self.context) == 0:
# we don't allow empty strings as they can't be represented as a
# directory very well
raise ValueError()
if len(self.value) == 0:
# we don't allow empty strings as they can't be represented as a
# directory very well
raise ValueError()
def __hash__(self):
return (self.context, self.value).__hash__()
def __eq__(self, other):
return self.value == other.value and self.context == other.context
def __repr__(self):
return '<Tag %s: %s>' % (self.context, self.value)
def parseTagsFromFile(system, tagFileName):
"""Parses the tags from the specified file.
@return: The parsed values are returned as a set containing Tag objects.
@see: Tag
"""
tags = set()
with system.open(tagFileName, 'r') as tagFile:
for rawTag in tagFile:
rawTag = rawTag.strip()
try:
if len(rawTag) == 0:
continue
tagTuple = rawTag.split(':', 1)
if len(tagTuple) == 1:
tagContext = None
tagValue = tagTuple[0]
else:
tagContext = tagTuple[0]
tagValue = tagTuple[1]
tag = Tag(tagValue, context = tagContext)
tags.add(tag)
except:
logging.warning('Skipping tagging \'%s\' from file \'%s\' as it can\'t be parsed\n%s.' % (rawTag, tagFileName, traceback.format_exc()))
return tags
+class NoSuchTagValue(Exception):
+
+ pass
+
class Item(object):
- def __init__(self, name, system, itemAccess, freebaseQueryParser, freebaseAdapter, parseTagsFromFile = parseTagsFromFile):
+ def __init__(self, name, system, itemAccess, freebaseQueryParser, freebaseAdapter, genericFreebaseQueries = [], parseTagsFromFile = parseTagsFromFile):
self.name = name
self.system = system
self.itemAccess = itemAccess
self.freebaseQueryParser = freebaseQueryParser
self.freebaseAdapter = freebaseAdapter
self.parseTagsFromFile = parseTagsFromFile
+ self.genericFreebaseQueries = genericFreebaseQueries
# TODO register at file system to receive tag file change events.
@property
@cache
def itemDirectory(self):
return os.path.join(self.itemAccess.dataDirectory, self.name)
@property
@cache
def _tagFileName(self):
"""Returns the name of the tag file for this item.
"""
return os.path.join(self.itemDirectory, self.itemAccess.tagFileName)
@property
@cache
def tagFileExists(self):
return self.system.pathExists(self._tagFileName)
+
+ def __getFreebaseTags(self, query):
+ try:
+ for context, values in self.freebaseAdapter.execute(query).iteritems():
+ for value in values:
+ yield Tag(value, context)
+ except Exception as e:
+ logging.error('Failed to execute freebase query %s: %s', query, e)
def __parseTags(self):
tagFileName = self._tagFileName
for rawTag in self.parseTagsFromFile(self.system, tagFileName):
if(rawTag.context == '_freebase'):
query = self.freebaseQueryParser.parse(rawTag.value)
- for context, values in self.freebaseAdapter.execute(query).iteritems():
- for value in values:
- yield Tag(value, context)
+ for tag in self.__getFreebaseTags(query):
+ yield tag
else:
yield rawTag
@property
@cache
def tagsCreationTime(self):
if not self.tagFileExists:
return None
return os.path.getctime(self._tagFileName)
@property
@cache
def tagsModificationTime(self):
"""Returns the last time when the tags have been modified.
"""
if not self.tagFileExists:
return None
return os.path.getmtime(self._tagFileName)
@property
@cache
def tags(self):
"""Returns the tags as a list for this item.
"""
if not self.tagFileExists:
return None
- return list(self.__parseTags())
+ tags = list(self.__parseTags())
+
+ def getValue(context):
+ for tag in tags:
+ if(tag.context == context):
+ return tag.value
+
+ raise NoSuchTagValue()
+
+ queryFactory = freebase_support.GenericQueryFactory(getValue)
+ for genericQuery in self.genericFreebaseQueries:
+ try:
+ query = queryFactory.createQuery(genericQuery.queryObject)
+
+ for tag in self.__getFreebaseTags(freebase_support.Query(query)):
+ tags.append(tag)
+ except NoSuchTagValue:
+ pass
+
+ return tags
@property
def values(self):
for t in self.tags:
yield t.value
def getTagsByContext(self, context):
for t in self.tags:
if context != t.context:
continue
yield t
+ def getValuesByContext(self, context):
+ return [t.value for t in self.getTagsByContext(context)]
+
+ def getValueByContext(self, context):
+ values = self.getValuesByContext(context)
+ valuesLen = len(values)
+
+ if(valuesLen == 0):
+ return None
+
+ if(valuesLen == 1):
+ return values[0]
+
+ raise Exception('Too many values found for context %s' % (context,))
+
def isTaggedWithContextValue(self, context, value):
for t in self.getTagsByContext(context):
if value == t.value:
return True
return False
def isTaggedWithContext(self, context):
# TODO don't create whole list... just check wheather list is empty
return (len([c for c in self.getTagsByContext(context)]) > 0)
def isTaggedWithValue(self, value):
for v in self.values:
if value == v:
return True
return False
@property
def tagged(self):
return self.tagFileExists
def __repr__(self):
return '<Item %s, %s>' % (self.name, self.tags)
class ItemAccess(object):
"""This is the access point to the Items.
"""
- def __init__(self, system, dataDirectory, tagFileName, freebaseQueryParser, freebaseAdapter):
+ def __init__(self, system, dataDirectory, tagFileName, freebaseQueryParser, freebaseAdapter, genericFreebaseQueries):
self.system = system
self.dataDirectory = dataDirectory
self.tagFileName = tagFileName
self.freebaseQueryParser = freebaseQueryParser
self.freebaseAdapter = freebaseAdapter
+ self.genericFreebaseQueries = genericFreebaseQueries
self.parseTime = 0
def __parseItems(self):
items = {}
logging.debug('Start parsing items from dir: %s', self.dataDirectory)
for itemName in os.listdir(self.dataDirectory):
if itemName == '.tagfs':
# skip directory with configuration
continue
try:
- item = Item(itemName, self.system, self, self.freebaseQueryParser, self.freebaseAdapter)
+ item = Item(itemName, self.system, self, self.freebaseQueryParser, self.freebaseAdapter, self.genericFreebaseQueries)
items[itemName] = item
except IOError, (error, strerror):
logging.error('Can \'t read tags for item %s: %s',
itemName,
strerror)
logging.debug('Found %s items', len(items))
self.parseTime = time.time()
return items
@property
@cache
def items(self):
return self.__parseItems()
@property
@cache
def tags(self):
tags = set()
for item in self.items.itervalues():
if not item.tagged:
continue
tags = tags | item.tags
return tags
@property
@cache
def taggedItems(self):
return set([item for item in self.items.itervalues() if item.tagged])
@property
@cache
def untaggedItems(self):
return set([item for item in self.items.itervalues() if not item.tagged])
def getItemDirectory(self, item):
return os.path.join(self.dataDirectory, item)
def contextTags(self, context):
contextTags = set()
for tag in self.tags:
if tag.context == context:
contextTags.add(tag)
return contextTags
@property
@cache
def contexts(self):
contexts = set()
for tag in self.tags:
if tag.context == None:
continue
contexts.add(tag.context)
return contexts
@property
@cache
def values(self):
values = set()
for tag in self.tags:
values.add(tag.value)
return values
def __str__(self):
return '[' + ', '.join([field + ': ' + str(self.__dict__[field]) for field in ['dataDirectory', 'tagFileName']]) + ']'
diff --git a/src/modules/tagfs/main.py b/src/modules/tagfs/main.py
index 96bc84b..e8bab8b 100644
--- a/src/modules/tagfs/main.py
+++ b/src/modules/tagfs/main.py
@@ -1,183 +1,197 @@
#!/usr/bin/env python
#
# Copyright 2009, 2010 Markus Pielmeier
#
# This file is part of tagfs.
#
# tagfs is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# tagfs is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with tagfs. If not, see <http://www.gnu.org/licenses/>.
#
#
# = tag fs =
# == glossary ==
# * item: An item is a directory in the item container directory. Items can be
# tagged using a tag file.
# * tag: A tag is a text string which can be assigned to an item. Tags can
# consist of any character except newlines.
import os
import stat
import errno
import exceptions
import time
import functools
import logging
import fuse
if not hasattr(fuse, '__version__'):
raise RuntimeError, \
"your fuse-py doesn't know of fuse.__version__, probably it's too old."
fuse.fuse_python_api = (0, 2)
from view import View
from cache import cache
from item_access import ItemAccess
from config import parseConfig
from log import logException
import sysIO
import freebase_support
class TagFS(fuse.Fuse):
def __init__(self, initwd, *args, **kw):
fuse.Fuse.__init__(self, *args, **kw)
self._initwd = initwd
self._itemsRoot = None
+ self.system = sysIO.createSystem()
+
# TODO change command line arguments structure
# goal: tagfs <items dir> <mount dir>
self.parser.add_option('-i',
'--items-dir',
dest = 'itemsDir',
help = 'items directory',
metavar = 'dir')
self.parser.add_option('-t',
'--tag-file',
dest = 'tagFileName',
help = 'tag file name',
metavar = 'file',
default = None)
self.parser.add_option('--value-filter',
action = 'store_true',
dest = 'enableValueFilters',
help = 'Displays value filter directories on toplevel instead of only context entries',
default = None)
self.parser.add_option('--root-items',
action = 'store_true',
dest = 'enableRootItemLinks',
help = 'Display item links in tagfs root directory.',
default = None)
+ def parseGenericFreebaseQueries(self, itemsRoot):
+ freebaseQueriesFilePath = os.path.join(itemsRoot, '.tagfs', 'freebase')
+
+ if(not os.path.exists(freebaseQueriesFilePath)):
+ return []
+
+ queries = list(freebase_support.QueryFileParser(self.system, freebase_support.QueryParser()).parseFile(freebaseQueriesFilePath))
+
+ logging.info('Parsed %s generic freebase queries', len(queries))
+
+ return queries
+
def getItemAccess(self):
# Maybe we should move the parser run from main here.
# Or we should at least check if it was run once...
opts, args = self.cmdline
# Maybe we should add expand user? Maybe even vars???
assert opts.itemsDir != None and opts.itemsDir != ''
itemsRoot = os.path.normpath(
os.path.join(self._initwd, opts.itemsDir))
# TODO rel https://github.com/marook/tagfs/issues#issue/2
# Ensure that mount-point and items dir are disjoined.
# Something along
# assert not os.path.normpath(itemsDir).startswith(itemsRoot)
-
+
# try/except here?
try:
- return ItemAccess(sysIO.createSystem(), itemsRoot, self.config.tagFileName, freebase_support.QueryParser(), freebase_support.createFreebaseAdapter())
+ return ItemAccess(self.system, itemsRoot, self.config.tagFileName, freebase_support.QueryParser(), freebase_support.createFreebaseAdapter(), self.parseGenericFreebaseQueries(itemsRoot))
except OSError, e:
logging.error("Can't create item access from items directory %s. Reason: %s",
itemsRoot, str(e.strerror))
raise
@property
@cache
def config(self):
opts, args = self.cmdline
c = parseConfig(os.path.normpath(os.path.join(self._initwd, opts.itemsDir)))
if opts.tagFileName:
c.tagFileName = opts.tagFileName
if opts.enableValueFilters:
c.enableValueFilters = opts.enableValueFilters
if opts.enableRootItemLinks:
c.enableRootItemLinks = opts.enableRootItemLinks
logging.debug('Using configuration %s' % c)
return c
@property
@cache
def view(self):
itemAccess = self.getItemAccess()
return View(itemAccess, self.config)
@logException
def getattr(self, path):
return self.view.getattr(path)
@logException
def readdir(self, path, offset):
return self.view.readdir(path, offset)
@logException
def readlink(self, path):
return self.view.readlink(path)
@logException
def open(self, path, flags):
return self.view.open(path, flags)
@logException
def read(self, path, size, offset):
return self.view.read(path, size, offset)
@logException
def write(self, path, data, pos):
return self.view.write(path, data, pos)
@logException
def symlink(self, path, linkPath):
return self.view.symlink(path, linkPath)
def main():
fs = TagFS(os.getcwd(),
version = "%prog " + fuse.__version__,
dash_s_do = 'setsingle')
fs.parse(errex = 1)
opts, args = fs.cmdline
if opts.itemsDir == None:
fs.parser.print_help()
# items dir should probably be an arg, not an option.
print "Error: Missing items directory option."
# Quickfix rel https://github.com/marook/tagfs/issues/#issue/3
# FIXME: since we run main via sys.exit(main()), this should
# probably be handled via some return code.
import sys
sys.exit()
return fs.main()
if __name__ == '__main__':
import sys
sys.exit(main())
diff --git a/src/test/tagfs_test_small/test_freebase_support_genericQueryFactory.py b/src/test/tagfs_test_small/test_freebase_support_genericQueryFactory.py
new file mode 100644
index 0000000..3566cb0
--- /dev/null
+++ b/src/test/tagfs_test_small/test_freebase_support_genericQueryFactory.py
@@ -0,0 +1,56 @@
+#
+# Copyright 2013 Markus Pielmeier
+#
+# This file is part of tagfs.
+#
+# tagfs is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# tagfs is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with tagfs. If not, see <http://www.gnu.org/licenses/>.
+
+import unittest
+import tagfs.freebase_support as freebase_support
+
+class WhenGenericQueryFactoryWithVariables(unittest.TestCase):
+
+ def resolveVar(self, name):
+ return self.variables[name]
+
+ def setUp(self):
+ super(WhenGenericQueryFactoryWithVariables, self).setUp()
+
+ self.variables = {}
+ self.factory = freebase_support.GenericQueryFactory(self.resolveVar)
+
+ self.varValue = 'value'
+ self.variables['var'] = self.varValue
+
+ def testResolveExistingVariable(self):
+ q = {'key': '$var',}
+
+ self.assertEqual(self.factory.createQuery(q), {'key': self.varValue,})
+
+ def testCreatedQueryIsNewInstance(self):
+ q = {}
+
+ self.assertTrue(not q is self.factory.createQuery(q))
+
+ def testGenericQueryIsUntouched(self):
+ q = {'key': '$var',}
+
+ self.factory.createQuery(q)
+
+ self.assertEqual(q, {'key': '$var',})
+
+ def testResolveValueToNone(self):
+ q = {'key': None,}
+
+ self.assertEqual(self.factory.createQuery(q), q)
diff --git a/src/test/tagfs_test_small/test_freebase_support_queryFileParser.py b/src/test/tagfs_test_small/test_freebase_support_queryFileParser.py
new file mode 100644
index 0000000..3b9c07f
--- /dev/null
+++ b/src/test/tagfs_test_small/test_freebase_support_queryFileParser.py
@@ -0,0 +1,42 @@
+#
+# Copyright 2013 Markus Pielmeier
+#
+# This file is part of tagfs.
+#
+# tagfs is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# tagfs is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with tagfs. If not, see <http://www.gnu.org/licenses/>.
+
+import unittest
+import tagfs.freebase_support as freebase_support
+import systemMocks
+
+class QueryParserMock(object):
+
+ def parse(self, queryString):
+ return 'rule'
+
+class WhenFileWithOneLineExists(unittest.TestCase):
+
+ def setUp(self):
+ super(WhenFileWithOneLineExists, self).setUp()
+
+ self.filePath = '/path/to/my/file'
+
+ self.system = systemMocks.SystemMock(self)
+ self.system.readFiles[self.filePath] = systemMocks.ReadLineFileMock(['line1',])
+
+ self.queryParser = QueryParserMock()
+ self.queryFileParser = freebase_support.QueryFileParser(self.system, self.queryParser)
+
+ def testThenParsesOneLine(self):
+ self.assertEqual(list(self.queryFileParser.parseFile(self.filePath)), ['rule',])
|
marook/tagfs
|
79970ac8c31e46b1b94224aa6b5d5d263e9acffe
|
added roadmap section to README.dev
|
diff --git a/README.dev b/README.dev
index 8f88b97..c6d270c 100644
--- a/README.dev
+++ b/README.dev
@@ -1,95 +1,103 @@
tagfs - tag file system
developer readme
-1) Logging
-2) Profiling
-3) Tracing
-4) Distribution
-4.1) tar Distribution
-5) Tests
-6) Code Coverage
-7) End-To-End Tests
+1) Roadmap
+2) Logging
+3) Profiling
+4) Tracing
+5) Distribution
+5.1) tar Distribution
+6) Tests
+7) Code Coverage
+8) End-To-End Tests
+
+
+---------------------------------------------------------------------
+Roadmap
+
+The upcomming tagfs features are listed in the 'backlog' file. The file is
+best viewed using emacs org-mode.
---------------------------------------------------------------------
Logging
You can enable logging by setting a debug environment variable before you
launch tagfs:
$ export DEBUG=1
tagfs will log to the console and the file /tmp/tagfs.log
---------------------------------------------------------------------
Profiling
You can enable profiling by setting a profile environment variable before you
launch tagfs:
$ export PROFILE=1
After unmounting your tagfs file system a profile file will be written. The
profile file will be written to the current directory. The profile file will
be named 'tagfs.profile'.
---------------------------------------------------------------------
Tracing
Tracing is done via the log output. There is a utility script to analyze the
log files. To analyze a log file execute the following
$ util/trace_logfiles.py /tmp/tagfs.log
The tracing script will output some statistics.
---------------------------------------------------------------------
tar Distribution
The tagfs project contains scripts for creating source distribution packages.
To create a tar distribution package you execute the following:
$ make distsnapshot
The make call will create an archive within the target directory. The created
tar file is used for tagfs source distribution.
---------------------------------------------------------------------
Tests
You can execute the test cases via the setup.py script in the project's root
directory.
$ python setup.py test
---------------------------------------------------------------------
Code Coverage
The tagfs unit tests can be executed with code coverage measurement enabled.
setup.py will measure the code coverage if the coverage lib is installed.
The coverage lib is available here: http://nedbatchelder.com/code/coverage
If you're a debian user you can try:
$ apt-get install python-coverage
The code coverage will be written below the reports directory after executing
the test cases:
$ python setup.py test
---------------------------------------------------------------------
End-To-End Tests
tagfs contains some end-to-end tests. The end-to-end tests first mount an
items directory and afterwards execute a shell script which can assert certain
conditions in the mounted tagfs.
The end-to-end tests can be run via the setup.py:
$ python setup.py e2e_test
The end-to-end tests are located below the test/e2e directory.
|
marook/tagfs
|
3def97443abfa94173412c70f844928b79c5cf08
|
added section aboug bugs to readme
|
diff --git a/README b/README
index 54c4de3..94b75fb 100644
--- a/README
+++ b/README
@@ -1,190 +1,199 @@
tagfs - tag file system
1) Introduction
2) Requirements
3) Installation
4) Tagging Files
5) Usage
6) Configuration
6.1) Options
6.1.1) tagFileName
6.1.2) enableValueFilters
6.1.3) enableRootItemLinks
7) Freebase Integration
-8) Further Reading
-9) Contact
+8) Bugs
+9) Further Reading
+10) Contact
---------------------------------------------------------------------
Introduction
tagfs is used to organize your files using tags.
This document contains basic usage instructions for users. To develop or debug
tagfs see the README.dev file.
---------------------------------------------------------------------
Requirements
* python 2.5, 2.6, 2.7
* Linux kernel with fuse enabled
* python-fuse installed
---------------------------------------------------------------------
Installation
To install tagfs into your home directory type the following:
$ python setup.py test e2e_test install --home ~/.local
If you haven't already extended your local python path then add the following
to your environment configuration script. For example to your ~/.bashrc:
$ export PYTHONPATH=~/.local/lib/python:$PYTHONPATH
You may also need to add ~/.local/bin to your PATH environment variable:
$ export PATH=~/.local/bin:$PATH
---------------------------------------------------------------------
Tagging Files
Before you can filter anything using tagfs you need to tag your items. An item
is a directory which contains a file called .tag. All items must be below one
directory.
Let's create a simple item structure.
First we create the root directory for all items:
$ mkdir items
Then we create our first item:
$ mkdir items/Ted
We tag the 'Ted' item as movie:
$ echo movie >> items/Ted/.tag
We also tag 'Ted' as genre comedy:
$ echo 'genre: comedy' >> items/Ted/.tag
Then we add a second item:
$ mkdir items/banana
$ echo fruit >> items/banana/.tag
$ echo 'genre: delicious' >> items/banana/.tag
Modifying .tag files using echo, grep, sed may be a little hard sometimes.
There are some convenience scripts available through the tagfs-utils project.
See https://github.com/marook/tagfs-utils for details.
---------------------------------------------------------------------
Usage
After installation tagfs can be started the following way.
Mount a tagged directory:
$ tagfs -i /path/to/my/items/directory /path/to/my/mount/point
Unmount a tagged directory:
$ fusermount -u /path/to/my/mount/point
Right now tagfs reads the taggings only when it's getting mounted. So if you
modify the tags after mounting you will not see any changes in the tagfs file
system.
In general tagfs will try to reduce the number of filter directories below the
virtual file system. That's why you may not see some filters which would not
reduce the number of selected items.
---------------------------------------------------------------------
Configuration
tagfs can be configured through configuration files. Configuration files are
searched in different locations by tagfs. The following locations are used.
Locations with higher priority come first:
- <items directory>/.tagfs/tagfs.conf
- ~/.tagfs/tagfs.conf
- /etc/tagfs/tagfs.conf
Right now the following configuration options are supported.
---------------------------------------------------------------------
Configuration - Options - tagFileName
Through this option the name of the parsed tag files can be specified. The
default value is '.tag'.
Example:
[global]
tagFileName = ABOUT
---------------------------------------------------------------------
Configuration - Options - enableValueFilters
You can enable or disable value filters. If you enable value filters you will
see filter directories for each tag value. For value filters the tag's
context can be anyone. The default value is 'false'.
Example:
[global]
enableValueFilters = true
---------------------------------------------------------------------
Configuration - Options - enableRootItemLinks
To show links to all items in the tagfs '/' directory enable this option. The
default value is 'false'.
Example:
[global]
enableRootItemLinks = true
---------------------------------------------------------------------
Freebase Integration
Freebase is an open graph of people, places and things. See
http://www.freebase.com/ for details. tagfs allows you to extend your own
taggings with data directly from the freebase graph.
WARNING! Freebase support is currently experimental. It is very likely that the
freebase syntax within the .tag files will change in future releases of tagfs.
In order to use freebase you need to install the freebase-python bindings. They
are available via https://code.google.com/p/freebase-python/
To extend an item's taggings with freebase data you have to add a freebase query
to the item's .tag file. Here's an example:
_freebase: {"id": "/m/0clpml", "type": "/fictional_universe/fictional_character", "name": null, "occupation": null}
tagfs uses the freebase MQL query format which is described below the following
link http://wiki.freebase.com/wiki/MQL
The query properties with null values are added as context/tag pairs to the
.tag file's item.
+---------------------------------------------------------------------
+Bugs
+
+Viewing existing and reporting new bugs can be done via the github issue
+tracker:
+https://github.com/marook/tagfs/issues
+
+
---------------------------------------------------------------------
Further Reading
Using a file system for my bank account (Markus Pielmeier)
http://pielmeier.blogspot.com/2010/08/using-file-system-for-my-bank-account.html
---------------------------------------------------------------------
Contact
* homepage: http://wiki.github.com/marook/tagfs
* user group: http://groups.google.com/group/tagfs
* author: Markus Pielmeier <[email protected]>
|
marook/tagfs
|
295249485dbb8c902c618ec2c2fe053ce98d1fd2
|
added missing import in freebase integration
|
diff --git a/src/modules/tagfs/freebase_support.py b/src/modules/tagfs/freebase_support.py
index 0b536d3..24b13cf 100644
--- a/src/modules/tagfs/freebase_support.py
+++ b/src/modules/tagfs/freebase_support.py
@@ -1,84 +1,86 @@
#
# Copyright 2012 Markus Pielmeier
#
# This file is part of tagfs.
#
# tagfs is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# tagfs is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with tagfs. If not, see <http://www.gnu.org/licenses/>.
import json
import logging
def createFreebaseAdapter():
# freebase is an optional dependency. tagfs should execute even if it's not
# available.
try:
import freebase
logging.info('freebase support enabled')
return FreebaseAdapter()
except ImportError:
logging.warn('freebase support disabled')
return FreebaseAdapterStub()
class FreebaseAdapterStub(object):
def execute(self, *args, **kwargs):
return {}
class FreebaseAdapter(object):
def execute(self, query):
+ import freebase
+
fbResult = freebase.mqlread(query.freebaseQuery)
result = {}
for key in query.selectedKeys:
result[key] = fbResult[key]
return result
class Query(object):
def __init__(self, queryObject):
self.queryObject = queryObject
@property
def freebaseQuery(self):
q = {}
for key, value in self.queryObject.iteritems():
if(value is None):
q[key] = []
else:
q[key] = value
return q
@property
def queryString(self):
# TODO this func is only used in tests => remove
return json.dumps(self.freebaseQuery, separators = (',', ':'))
@property
def selectedKeys(self):
for key, value in self.queryObject.iteritems():
if(value is None):
yield key
class QueryParser(object):
def parse(self, queryString):
return Query(json.loads(queryString))
|
marook/tagfs
|
77ebfbf3582df79041b3020042b85e010c21aec0
|
added e2e test which valides umlaute
|
diff --git a/test/e2e/umlauteItems/assert b/test/e2e/umlauteItems/assert
new file mode 100755
index 0000000..6311d12
--- /dev/null
+++ b/test/e2e/umlauteItems/assert
@@ -0,0 +1,4 @@
+#!/bin/bash
+
+assertDir "$TEST_MOUNT_DIR/.any_context/umlaut"
+assertDir "$TEST_MOUNT_DIR/.any_context/umlaut/äöü"
diff --git a/test/e2e/umlauteItems/items/aou/.tag b/test/e2e/umlauteItems/items/aou/.tag
new file mode 100644
index 0000000..9603c37
--- /dev/null
+++ b/test/e2e/umlauteItems/items/aou/.tag
@@ -0,0 +1 @@
+noumlaut
diff --git "a/test/e2e/umlauteItems/items/\303\244\303\266\303\274/.tag" "b/test/e2e/umlauteItems/items/\303\244\303\266\303\274/.tag"
new file mode 100644
index 0000000..b39b3b6
--- /dev/null
+++ "b/test/e2e/umlauteItems/items/\303\244\303\266\303\274/.tag"
@@ -0,0 +1 @@
+umlaut
|
marook/tagfs
|
fa541ec8cc455c02825902a8e325f91e49ff3053
|
added assertExists
|
diff --git a/bin/e2eAssertSandbox.sh b/bin/e2eAssertSandbox.sh
index af5bd08..6e3bc87 100755
--- a/bin/e2eAssertSandbox.sh
+++ b/bin/e2eAssertSandbox.sh
@@ -1,35 +1,48 @@
#!/bin/bash
set -e
ASSERT_BIN=$1
fail() {
echo "TEST FAILED: $1" >&2
exit 1
}
+assertExists(){
+ P=$1
+
+ if [ ! -e "$P" ]
+ then
+ fail "Expected path to exist: $P"
+ fi
+}
+
assertLink(){
- PATH=$1
+ P=$1
+
+ assertExists "$P"
- if [ ! -L "$PATH" ]
+ if [ ! -L "$P" ]
then
- fail "Expected path to be link: $PATH"
+ fail "Expected path to be link: $P"
fi
}
assertDir(){
- PATH=$1
+ P=$1
+
+ assertExists "$P"
- if [ ! -d "$PATH" ]
+ if [ ! -d "$P" ]
then
- fail "Expected path to be a directory: $PATH"
+ fail "Expected path to be a directory: $P"
fi
}
assertEqualContent(){
cmp "$1" "$2" > /dev/null || fail "File content is not equal: $1 and $2 ($DIFF)"
}
cd `dirname "$ASSERT_BIN"`
. $ASSERT_BIN > "$ASSERT_BIN.log"
|
marook/tagfs
|
bad26a27dcb0b639da01c71e3b02c04e09bd6a3c
|
e2e assert output is now logged to file
|
diff --git a/bin/e2eAssertSandbox.sh b/bin/e2eAssertSandbox.sh
index bd92c0a..af5bd08 100755
--- a/bin/e2eAssertSandbox.sh
+++ b/bin/e2eAssertSandbox.sh
@@ -1,26 +1,35 @@
#!/bin/bash
set -e
ASSERT_BIN=$1
fail() {
echo "TEST FAILED: $1" >&2
exit 1
}
assertLink(){
PATH=$1
if [ ! -L "$PATH" ]
then
fail "Expected path to be link: $PATH"
fi
}
+assertDir(){
+ PATH=$1
+
+ if [ ! -d "$PATH" ]
+ then
+ fail "Expected path to be a directory: $PATH"
+ fi
+}
+
assertEqualContent(){
cmp "$1" "$2" > /dev/null || fail "File content is not equal: $1 and $2 ($DIFF)"
}
cd `dirname "$ASSERT_BIN"`
-. $ASSERT_BIN
+. $ASSERT_BIN > "$ASSERT_BIN.log"
diff --git a/test/e2e/.gitignore b/test/e2e/.gitignore
new file mode 100644
index 0000000..af16581
--- /dev/null
+++ b/test/e2e/.gitignore
@@ -0,0 +1 @@
+assert.log
|
marook/tagfs
|
46bfd6400f09e6af64c6bc361286b7dbbb796248
|
e2e test no longer tries to execute files
|
diff --git a/setup.py b/setup.py
index a66dddd..20609bd 100644
--- a/setup.py
+++ b/setup.py
@@ -1,290 +1,295 @@
#!/usr/bin/env python
#
# Copyright 2009 Peter Prohaska
#
# This file is part of tagfs.
#
# tagfs is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# tagfs is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with tagfs. If not, see <http://www.gnu.org/licenses/>.
#
from distutils.core import setup, Command
import sys
import os
from os.path import (
basename,
dirname,
abspath,
splitext,
join as pjoin
)
from glob import glob
from unittest import TestLoader, TextTestRunner
import re
import datetime
from subprocess import call
projectdir = dirname(abspath(__file__))
reportdir = pjoin(projectdir, 'reports')
srcdir = pjoin(projectdir, 'src')
bindir = pjoin(srcdir, 'bin')
moddir = pjoin(srcdir, 'modules')
testdir = pjoin(srcdir, 'test')
endToEndTestDir = pjoin(projectdir, 'test', 'e2e')
testdatadir = pjoin(projectdir, 'etc', 'test', 'events')
testmntdir = pjoin(projectdir, 'mnt')
assert os.path.isdir(srcdir)
assert os.path.isdir(bindir)
assert os.path.isdir(moddir)
assert os.path.isdir(testdir)
assert os.path.isdir(testdatadir)
class Report(object):
def __init__(self):
self.reportDateTime = datetime.datetime.utcnow()
self.reportDir = os.path.join(reportdir, self.reportDateTime.strftime('%Y-%m-%d_%H_%M_%S'))
# fails when dir already exists which is nice
os.makedirs(self.reportDir)
@property
def coverageReportFileName(self):
return os.path.join(self.reportDir, 'coverage.txt')
@property
def unitTestReportFileName(self):
return os.path.join(self.reportDir, 'tests.txt')
def sourceFiles():
yield os.path.join(bindir, 'tagfs')
sourceFilePattern = re.compile('^.*[.]py$')
for root, dirs, files in os.walk(moddir):
for f in files:
if(not sourceFilePattern.match(f)):
continue
if(f.startswith('.#')):
continue
yield os.path.join(root, f)
def fullSplit(p):
head, tail = os.path.split(p)
if(len(head) > 0):
for n in fullSplit(head):
yield n
yield tail
def testModules():
testFilePattern = re.compile('^(test.*)[.]py$', re.IGNORECASE)
for root, dirs, files in os.walk(testdir):
for f in files:
m = testFilePattern.match(f)
if(not m):
continue
relDir = os.path.relpath(root, testdir)
yield '.'.join([n for n in fullSplit(relDir)] + [m.group(1), ])
def printFile(fileName):
if(not os.path.exists(fileName)):
# TODO maybe we should not silently return?
return
with open(fileName, 'r') as f:
for line in f:
sys.stdout.write(line)
class TestFailException(Exception):
'''Indicates that at lease one of the unit tests has failed
'''
pass
class test(Command):
description = 'run tests'
user_options = []
def initialize_options(self):
self._cwd = os.getcwd()
self._verbosity = 2
def finalize_options(self): pass
def run(self):
report = Report()
tests = [m for m in testModules()]
print "..using:"
print " moddir:", moddir
print " testdir:", testdir
print " testdatadir:", testdatadir
print " testmntdir:", testmntdir
print " tests:", tests
print " sys.path:", sys.path
print
# insert project lookup paths at index 0 to make sure they are used
# over global libraries
sys.path.insert(0, moddir)
sys.path.insert(0, testdir)
# TODO try to import all test cases here. the TestLoader is throwing
# very confusing errors when imports can't be resolved.
# configure logging
# TODO not sure how to enable this... it's a bit complicate to enable
# logging only for 'make mt' and disable it then for
# 'python setup.py test'. 'python setup.py test' is such a gabber...
#if 'DEBUG' in os.environ:
# from tagfs import log_config
# log_config.setUpLogging()
if 'DEBUG' in os.environ:
import logging
logging.basicConfig(level = logging.DEBUG)
suite = TestLoader().loadTestsFromNames(tests)
try:
with open(report.unitTestReportFileName, 'w') as testResultsFile:
r = TextTestRunner(stream = testResultsFile, verbosity = self._verbosity)
def runTests():
result = r.run(suite)
if(not result.wasSuccessful()):
raise TestFailException()
try:
import coverage
c = coverage.coverage()
c.start()
runTests()
c.stop()
with open(report.coverageReportFileName, 'w') as reportFile:
c.report([f for f in sourceFiles()], file = reportFile)
except ImportError:
# TODO ImportErrors from runTests() may look like coverage is missing
print ''
print 'coverage module not found.'
print 'To view source coverage stats install http://nedbatchelder.com/code/coverage/'
print ''
runTests()
finally:
# TODO use two streams instead of printing files after writing
printFile(report.unitTestReportFileName)
printFile(report.coverageReportFileName)
class EndToEndTestFailure(Exception):
def __init__(self, testPath):
super(EndToEndTestFailure, self).__init__('end-to-end test failed: %s' % testPath)
class EndToEndTests(Command):
description = 'execute the end-to-end tests'
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def runTest(self, testPath):
if(not call(['bin/runEndToEndTest.sh', testPath]) is 0):
raise EndToEndTestFailure(testPath)
def run(self):
for endToEndDirName in os.listdir(endToEndTestDir):
- self.runTest(os.path.join(endToEndTestDir, endToEndDirName))
+ testPath = os.path.join(endToEndTestDir, endToEndDirName)
+
+ if(not os.path.isdir(testPath)):
+ continue
+
+ self.runTest(testPath)
# Overrides default clean (which cleans from build runs)
# This clean should probably be hooked into that somehow.
class clean_pyc(Command):
description = 'remove *.pyc files from source directory'
user_options = []
def initialize_options(self):
self._delete = []
for cwd, dirs, files in os.walk(projectdir):
self._delete.extend(
pjoin(cwd, f) for f in files if f.endswith('.pyc')
)
def finalize_options(self):
pass
def run(self):
for f in self._delete:
try:
os.unlink(f)
except OSError, e:
print "Strange '%s': %s" % (f, e)
# Could be a directory.
# Can we detect file in use errors or are they OSErrors
# as well?
# Shall we catch all?
setup(
cmdclass = {
'test': test,
'clean_pyc': clean_pyc,
'e2e_test': EndToEndTests,
},
name = 'tagfs',
version = '0.1',
url = 'http://wiki.github.com/marook/tagfs',
description = '',
long_description = '',
author = 'Markus Pielmeier',
author_email = '[email protected]',
license = 'GPLv3',
download_url = 'http://github.com/marook/tagfs/downloads/tagfs_0.1-src.tar.bz2',
platforms = 'Linux',
requires = [],
classifiers = [
'Development Status :: 2 - Pre-Alpha',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: GNU General Public License (GPL)',
'Natural Language :: English',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python',
'Topic :: System :: Filesystems'
],
data_files = [
(pjoin('share', 'doc', 'tagfs'), ['AUTHORS', 'COPYING', 'README'])
],
# TODO maybe we should include src/bin/*?
scripts = [pjoin(bindir, 'tagfs')],
packages = ['tagfs'],
package_dir = {'': moddir},
)
|
marook/tagfs
|
28c24e5f2c7fd6b4e6c471520087a03c79d33909
|
removed debugging code
|
diff --git a/src/modules/tagfs/item_access.py b/src/modules/tagfs/item_access.py
index ad76bc5..af2a9e3 100644
--- a/src/modules/tagfs/item_access.py
+++ b/src/modules/tagfs/item_access.py
@@ -1,307 +1,303 @@
#
# Copyright 2009 Markus Pielmeier
#
# This file is part of tagfs.
#
# tagfs is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# tagfs is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with tagfs. If not, see <http://www.gnu.org/licenses/>.
import logging
import os
import time
import traceback
from cache import cache
import sysIO
class Tag(object):
def __init__(self, value, context = None):
if context == None:
self.context = None
else:
self.context = context.strip()
self.value = value.strip()
if not self.context == None and len(self.context) == 0:
# we don't allow empty strings as they can't be represented as a
# directory very well
raise ValueError()
if len(self.value) == 0:
# we don't allow empty strings as they can't be represented as a
# directory very well
raise ValueError()
def __hash__(self):
return (self.context, self.value).__hash__()
def __eq__(self, other):
return self.value == other.value and self.context == other.context
def __repr__(self):
return '<Tag %s: %s>' % (self.context, self.value)
def parseTagsFromFile(system, tagFileName):
"""Parses the tags from the specified file.
@return: The parsed values are returned as a set containing Tag objects.
@see: Tag
"""
tags = set()
with system.open(tagFileName, 'r') as tagFile:
for rawTag in tagFile:
rawTag = rawTag.strip()
try:
if len(rawTag) == 0:
continue
tagTuple = rawTag.split(':', 1)
if len(tagTuple) == 1:
tagContext = None
tagValue = tagTuple[0]
else:
tagContext = tagTuple[0]
tagValue = tagTuple[1]
tag = Tag(tagValue, context = tagContext)
tags.add(tag)
except:
logging.warning('Skipping tagging \'%s\' from file \'%s\' as it can\'t be parsed\n%s.' % (rawTag, tagFileName, traceback.format_exc()))
return tags
class Item(object):
def __init__(self, name, system, itemAccess, freebaseQueryParser, freebaseAdapter, parseTagsFromFile = parseTagsFromFile):
self.name = name
self.system = system
self.itemAccess = itemAccess
self.freebaseQueryParser = freebaseQueryParser
self.freebaseAdapter = freebaseAdapter
self.parseTagsFromFile = parseTagsFromFile
# TODO register at file system to receive tag file change events.
@property
@cache
def itemDirectory(self):
return os.path.join(self.itemAccess.dataDirectory, self.name)
@property
@cache
def _tagFileName(self):
"""Returns the name of the tag file for this item.
"""
return os.path.join(self.itemDirectory, self.itemAccess.tagFileName)
@property
@cache
def tagFileExists(self):
return self.system.pathExists(self._tagFileName)
def __parseTags(self):
tagFileName = self._tagFileName
for rawTag in self.parseTagsFromFile(self.system, tagFileName):
if(rawTag.context == '_freebase'):
query = self.freebaseQueryParser.parse(rawTag.value)
for context, values in self.freebaseAdapter.execute(query).iteritems():
for value in values:
yield Tag(value, context)
else:
yield rawTag
@property
@cache
def tagsCreationTime(self):
if not self.tagFileExists:
return None
return os.path.getctime(self._tagFileName)
@property
@cache
def tagsModificationTime(self):
"""Returns the last time when the tags have been modified.
"""
if not self.tagFileExists:
return None
return os.path.getmtime(self._tagFileName)
@property
@cache
def tags(self):
"""Returns the tags as a list for this item.
"""
if not self.tagFileExists:
return None
return list(self.__parseTags())
@property
def values(self):
for t in self.tags:
yield t.value
def getTagsByContext(self, context):
for t in self.tags:
if context != t.context:
continue
yield t
def isTaggedWithContextValue(self, context, value):
for t in self.getTagsByContext(context):
if value == t.value:
return True
return False
def isTaggedWithContext(self, context):
# TODO don't create whole list... just check wheather list is empty
return (len([c for c in self.getTagsByContext(context)]) > 0)
def isTaggedWithValue(self, value):
for v in self.values:
if value == v:
return True
return False
@property
def tagged(self):
return self.tagFileExists
def __repr__(self):
return '<Item %s, %s>' % (self.name, self.tags)
class ItemAccess(object):
"""This is the access point to the Items.
"""
def __init__(self, system, dataDirectory, tagFileName, freebaseQueryParser, freebaseAdapter):
self.system = system
self.dataDirectory = dataDirectory
self.tagFileName = tagFileName
self.freebaseQueryParser = freebaseQueryParser
self.freebaseAdapter = freebaseAdapter
self.parseTime = 0
def __parseItems(self):
items = {}
logging.debug('Start parsing items from dir: %s', self.dataDirectory)
for itemName in os.listdir(self.dataDirectory):
if itemName == '.tagfs':
# skip directory with configuration
continue
try:
item = Item(itemName, self.system, self, self.freebaseQueryParser, self.freebaseAdapter)
items[itemName] = item
except IOError, (error, strerror):
logging.error('Can \'t read tags for item %s: %s',
itemName,
strerror)
logging.debug('Found %s items', len(items))
self.parseTime = time.time()
return items
@property
@cache
def items(self):
- items = self.__parseItems()
-
- logging.info('items: %s', items) # TODO remove me
-
- return items
+ return self.__parseItems()
@property
@cache
def tags(self):
tags = set()
for item in self.items.itervalues():
if not item.tagged:
continue
tags = tags | item.tags
return tags
@property
@cache
def taggedItems(self):
return set([item for item in self.items.itervalues() if item.tagged])
@property
@cache
def untaggedItems(self):
return set([item for item in self.items.itervalues() if not item.tagged])
def getItemDirectory(self, item):
return os.path.join(self.dataDirectory, item)
def contextTags(self, context):
contextTags = set()
for tag in self.tags:
if tag.context == context:
contextTags.add(tag)
return contextTags
@property
@cache
def contexts(self):
contexts = set()
for tag in self.tags:
if tag.context == None:
continue
contexts.add(tag.context)
return contexts
@property
@cache
def values(self):
values = set()
for tag in self.tags:
values.add(tag.value)
return values
def __str__(self):
return '[' + ', '.join([field + ': ' + str(self.__dict__[field]) for field in ['dataDirectory', 'tagFileName']]) + ']'
|
marook/tagfs
|
4de870bd9eb1a0796e85bcaf3bb65b3555440b80
|
extended freebase support in README
|
diff --git a/README b/README
index dd97abe..54c4de3 100644
--- a/README
+++ b/README
@@ -1,183 +1,190 @@
tagfs - tag file system
1) Introduction
2) Requirements
3) Installation
4) Tagging Files
5) Usage
6) Configuration
6.1) Options
6.1.1) tagFileName
6.1.2) enableValueFilters
6.1.3) enableRootItemLinks
7) Freebase Integration
8) Further Reading
9) Contact
---------------------------------------------------------------------
Introduction
tagfs is used to organize your files using tags.
This document contains basic usage instructions for users. To develop or debug
tagfs see the README.dev file.
---------------------------------------------------------------------
Requirements
* python 2.5, 2.6, 2.7
* Linux kernel with fuse enabled
* python-fuse installed
---------------------------------------------------------------------
Installation
To install tagfs into your home directory type the following:
$ python setup.py test e2e_test install --home ~/.local
If you haven't already extended your local python path then add the following
to your environment configuration script. For example to your ~/.bashrc:
$ export PYTHONPATH=~/.local/lib/python:$PYTHONPATH
You may also need to add ~/.local/bin to your PATH environment variable:
$ export PATH=~/.local/bin:$PATH
---------------------------------------------------------------------
Tagging Files
Before you can filter anything using tagfs you need to tag your items. An item
is a directory which contains a file called .tag. All items must be below one
directory.
Let's create a simple item structure.
First we create the root directory for all items:
$ mkdir items
Then we create our first item:
$ mkdir items/Ted
We tag the 'Ted' item as movie:
$ echo movie >> items/Ted/.tag
We also tag 'Ted' as genre comedy:
$ echo 'genre: comedy' >> items/Ted/.tag
Then we add a second item:
$ mkdir items/banana
$ echo fruit >> items/banana/.tag
$ echo 'genre: delicious' >> items/banana/.tag
Modifying .tag files using echo, grep, sed may be a little hard sometimes.
There are some convenience scripts available through the tagfs-utils project.
See https://github.com/marook/tagfs-utils for details.
---------------------------------------------------------------------
Usage
After installation tagfs can be started the following way.
Mount a tagged directory:
$ tagfs -i /path/to/my/items/directory /path/to/my/mount/point
Unmount a tagged directory:
$ fusermount -u /path/to/my/mount/point
Right now tagfs reads the taggings only when it's getting mounted. So if you
modify the tags after mounting you will not see any changes in the tagfs file
system.
In general tagfs will try to reduce the number of filter directories below the
virtual file system. That's why you may not see some filters which would not
reduce the number of selected items.
---------------------------------------------------------------------
Configuration
tagfs can be configured through configuration files. Configuration files are
searched in different locations by tagfs. The following locations are used.
Locations with higher priority come first:
- <items directory>/.tagfs/tagfs.conf
- ~/.tagfs/tagfs.conf
- /etc/tagfs/tagfs.conf
Right now the following configuration options are supported.
---------------------------------------------------------------------
Configuration - Options - tagFileName
Through this option the name of the parsed tag files can be specified. The
default value is '.tag'.
Example:
[global]
tagFileName = ABOUT
---------------------------------------------------------------------
Configuration - Options - enableValueFilters
You can enable or disable value filters. If you enable value filters you will
see filter directories for each tag value. For value filters the tag's
context can be anyone. The default value is 'false'.
Example:
[global]
enableValueFilters = true
---------------------------------------------------------------------
Configuration - Options - enableRootItemLinks
To show links to all items in the tagfs '/' directory enable this option. The
default value is 'false'.
Example:
[global]
enableRootItemLinks = true
---------------------------------------------------------------------
Freebase Integration
-Freebase is an open graph of people, places and things. tagfs allows you to
-extend your own taggings with data directly from the freebase graph.
+Freebase is an open graph of people, places and things. See
+http://www.freebase.com/ for details. tagfs allows you to extend your own
+taggings with data directly from the freebase graph.
WARNING! Freebase support is currently experimental. It is very likely that the
freebase syntax within the .tag files will change in future releases of tagfs.
In order to use freebase you need to install the freebase-python bindings. They
are available via https://code.google.com/p/freebase-python/
To extend an item's taggings with freebase data you have to add a freebase query
to the item's .tag file. Here's an example:
-_freebase: {'id': '/m/0clpml', 'type': '/fictional_universe/fictional_character', 'name': null, 'occupation': null}
+_freebase: {"id": "/m/0clpml", "type": "/fictional_universe/fictional_character", "name": null, "occupation": null}
+
+tagfs uses the freebase MQL query format which is described below the following
+link http://wiki.freebase.com/wiki/MQL
+
+The query properties with null values are added as context/tag pairs to the
+.tag file's item.
---------------------------------------------------------------------
Further Reading
Using a file system for my bank account (Markus Pielmeier)
http://pielmeier.blogspot.com/2010/08/using-file-system-for-my-bank-account.html
---------------------------------------------------------------------
Contact
* homepage: http://wiki.github.com/marook/tagfs
* user group: http://groups.google.com/group/tagfs
* author: Markus Pielmeier <[email protected]>
|
marook/tagfs
|
d138e05427be96850b8ce9cccef8694162d92d24
|
made freebase dependency optional
|
diff --git a/src/modules/tagfs/freebase_support.py b/src/modules/tagfs/freebase_support.py
index 2933855..0b536d3 100644
--- a/src/modules/tagfs/freebase_support.py
+++ b/src/modules/tagfs/freebase_support.py
@@ -1,71 +1,84 @@
#
# Copyright 2012 Markus Pielmeier
#
# This file is part of tagfs.
#
# tagfs is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# tagfs is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with tagfs. If not, see <http://www.gnu.org/licenses/>.
import json
-import freebase
+import logging
-def createFreebaseAdapter(object):
+def createFreebaseAdapter():
+ # freebase is an optional dependency. tagfs should execute even if it's not
+ # available.
+ try:
+ import freebase
- import freebase
+ logging.info('freebase support enabled')
- return FreebaseAdapter()
+ return FreebaseAdapter()
+ except ImportError:
+ logging.warn('freebase support disabled')
+
+ return FreebaseAdapterStub()
+class FreebaseAdapterStub(object):
+
+ def execute(self, *args, **kwargs):
+ return {}
+
class FreebaseAdapter(object):
def execute(self, query):
fbResult = freebase.mqlread(query.freebaseQuery)
result = {}
for key in query.selectedKeys:
result[key] = fbResult[key]
return result
class Query(object):
def __init__(self, queryObject):
self.queryObject = queryObject
@property
def freebaseQuery(self):
q = {}
for key, value in self.queryObject.iteritems():
if(value is None):
q[key] = []
else:
q[key] = value
return q
@property
def queryString(self):
# TODO this func is only used in tests => remove
return json.dumps(self.freebaseQuery, separators = (',', ':'))
@property
def selectedKeys(self):
for key, value in self.queryObject.iteritems():
if(value is None):
yield key
class QueryParser(object):
def parse(self, queryString):
return Query(json.loads(queryString))
diff --git a/src/modules/tagfs/main.py b/src/modules/tagfs/main.py
index ef162db..96bc84b 100644
--- a/src/modules/tagfs/main.py
+++ b/src/modules/tagfs/main.py
@@ -1,183 +1,183 @@
#!/usr/bin/env python
#
# Copyright 2009, 2010 Markus Pielmeier
#
# This file is part of tagfs.
#
# tagfs is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# tagfs is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with tagfs. If not, see <http://www.gnu.org/licenses/>.
#
#
# = tag fs =
# == glossary ==
# * item: An item is a directory in the item container directory. Items can be
# tagged using a tag file.
# * tag: A tag is a text string which can be assigned to an item. Tags can
# consist of any character except newlines.
import os
import stat
import errno
import exceptions
import time
import functools
import logging
import fuse
if not hasattr(fuse, '__version__'):
raise RuntimeError, \
"your fuse-py doesn't know of fuse.__version__, probably it's too old."
fuse.fuse_python_api = (0, 2)
from view import View
from cache import cache
from item_access import ItemAccess
from config import parseConfig
from log import logException
import sysIO
import freebase_support
class TagFS(fuse.Fuse):
def __init__(self, initwd, *args, **kw):
fuse.Fuse.__init__(self, *args, **kw)
self._initwd = initwd
self._itemsRoot = None
# TODO change command line arguments structure
# goal: tagfs <items dir> <mount dir>
self.parser.add_option('-i',
'--items-dir',
dest = 'itemsDir',
help = 'items directory',
metavar = 'dir')
self.parser.add_option('-t',
'--tag-file',
dest = 'tagFileName',
help = 'tag file name',
metavar = 'file',
default = None)
self.parser.add_option('--value-filter',
action = 'store_true',
dest = 'enableValueFilters',
help = 'Displays value filter directories on toplevel instead of only context entries',
default = None)
self.parser.add_option('--root-items',
action = 'store_true',
dest = 'enableRootItemLinks',
help = 'Display item links in tagfs root directory.',
default = None)
def getItemAccess(self):
# Maybe we should move the parser run from main here.
# Or we should at least check if it was run once...
opts, args = self.cmdline
# Maybe we should add expand user? Maybe even vars???
assert opts.itemsDir != None and opts.itemsDir != ''
itemsRoot = os.path.normpath(
os.path.join(self._initwd, opts.itemsDir))
# TODO rel https://github.com/marook/tagfs/issues#issue/2
# Ensure that mount-point and items dir are disjoined.
# Something along
# assert not os.path.normpath(itemsDir).startswith(itemsRoot)
# try/except here?
try:
- return ItemAccess(sysIO.createSystem(), itemsRoot, self.config.tagFileName, freebase_support.QueryParser(), freebase_support.FreebaseAdapter())
+ return ItemAccess(sysIO.createSystem(), itemsRoot, self.config.tagFileName, freebase_support.QueryParser(), freebase_support.createFreebaseAdapter())
except OSError, e:
logging.error("Can't create item access from items directory %s. Reason: %s",
itemsRoot, str(e.strerror))
raise
@property
@cache
def config(self):
opts, args = self.cmdline
c = parseConfig(os.path.normpath(os.path.join(self._initwd, opts.itemsDir)))
if opts.tagFileName:
c.tagFileName = opts.tagFileName
if opts.enableValueFilters:
c.enableValueFilters = opts.enableValueFilters
if opts.enableRootItemLinks:
c.enableRootItemLinks = opts.enableRootItemLinks
logging.debug('Using configuration %s' % c)
return c
@property
@cache
def view(self):
itemAccess = self.getItemAccess()
return View(itemAccess, self.config)
@logException
def getattr(self, path):
return self.view.getattr(path)
@logException
def readdir(self, path, offset):
return self.view.readdir(path, offset)
@logException
def readlink(self, path):
return self.view.readlink(path)
@logException
def open(self, path, flags):
return self.view.open(path, flags)
@logException
def read(self, path, size, offset):
return self.view.read(path, size, offset)
@logException
def write(self, path, data, pos):
return self.view.write(path, data, pos)
@logException
def symlink(self, path, linkPath):
return self.view.symlink(path, linkPath)
def main():
fs = TagFS(os.getcwd(),
version = "%prog " + fuse.__version__,
dash_s_do = 'setsingle')
fs.parse(errex = 1)
opts, args = fs.cmdline
if opts.itemsDir == None:
fs.parser.print_help()
# items dir should probably be an arg, not an option.
print "Error: Missing items directory option."
# Quickfix rel https://github.com/marook/tagfs/issues/#issue/3
# FIXME: since we run main via sys.exit(main()), this should
# probably be handled via some return code.
import sys
sys.exit()
return fs.main()
if __name__ == '__main__':
import sys
sys.exit(main())
|
marook/tagfs
|
897b112be6a43aaf087243d181e64726972b0484
|
fixed some encoding issues
|
diff --git a/src/modules/tagfs/item_access.py b/src/modules/tagfs/item_access.py
index 451ae32..ad76bc5 100644
--- a/src/modules/tagfs/item_access.py
+++ b/src/modules/tagfs/item_access.py
@@ -1,307 +1,307 @@
#
# Copyright 2009 Markus Pielmeier
#
# This file is part of tagfs.
#
# tagfs is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# tagfs is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with tagfs. If not, see <http://www.gnu.org/licenses/>.
import logging
import os
import time
import traceback
from cache import cache
import sysIO
class Tag(object):
def __init__(self, value, context = None):
if context == None:
self.context = None
else:
self.context = context.strip()
self.value = value.strip()
if not self.context == None and len(self.context) == 0:
# we don't allow empty strings as they can't be represented as a
# directory very well
raise ValueError()
if len(self.value) == 0:
# we don't allow empty strings as they can't be represented as a
# directory very well
raise ValueError()
def __hash__(self):
return (self.context, self.value).__hash__()
def __eq__(self, other):
return self.value == other.value and self.context == other.context
def __repr__(self):
return '<Tag %s: %s>' % (self.context, self.value)
def parseTagsFromFile(system, tagFileName):
"""Parses the tags from the specified file.
@return: The parsed values are returned as a set containing Tag objects.
@see: Tag
"""
tags = set()
with system.open(tagFileName, 'r') as tagFile:
for rawTag in tagFile:
rawTag = rawTag.strip()
try:
if len(rawTag) == 0:
continue
tagTuple = rawTag.split(':', 1)
if len(tagTuple) == 1:
tagContext = None
tagValue = tagTuple[0]
else:
tagContext = tagTuple[0]
tagValue = tagTuple[1]
tag = Tag(tagValue, context = tagContext)
tags.add(tag)
except:
logging.warning('Skipping tagging \'%s\' from file \'%s\' as it can\'t be parsed\n%s.' % (rawTag, tagFileName, traceback.format_exc()))
return tags
class Item(object):
def __init__(self, name, system, itemAccess, freebaseQueryParser, freebaseAdapter, parseTagsFromFile = parseTagsFromFile):
self.name = name
self.system = system
self.itemAccess = itemAccess
self.freebaseQueryParser = freebaseQueryParser
self.freebaseAdapter = freebaseAdapter
self.parseTagsFromFile = parseTagsFromFile
# TODO register at file system to receive tag file change events.
@property
@cache
def itemDirectory(self):
return os.path.join(self.itemAccess.dataDirectory, self.name)
@property
@cache
def _tagFileName(self):
"""Returns the name of the tag file for this item.
"""
return os.path.join(self.itemDirectory, self.itemAccess.tagFileName)
@property
@cache
def tagFileExists(self):
return self.system.pathExists(self._tagFileName)
def __parseTags(self):
tagFileName = self._tagFileName
for rawTag in self.parseTagsFromFile(self.system, tagFileName):
if(rawTag.context == '_freebase'):
query = self.freebaseQueryParser.parse(rawTag.value)
for context, values in self.freebaseAdapter.execute(query).iteritems():
for value in values:
yield Tag(value, context)
else:
yield rawTag
@property
@cache
def tagsCreationTime(self):
if not self.tagFileExists:
return None
return os.path.getctime(self._tagFileName)
@property
@cache
def tagsModificationTime(self):
"""Returns the last time when the tags have been modified.
"""
if not self.tagFileExists:
return None
return os.path.getmtime(self._tagFileName)
@property
@cache
def tags(self):
"""Returns the tags as a list for this item.
"""
if not self.tagFileExists:
return None
return list(self.__parseTags())
@property
def values(self):
for t in self.tags:
yield t.value
def getTagsByContext(self, context):
for t in self.tags:
if context != t.context:
continue
yield t
def isTaggedWithContextValue(self, context, value):
for t in self.getTagsByContext(context):
if value == t.value:
return True
return False
def isTaggedWithContext(self, context):
# TODO don't create whole list... just check wheather list is empty
return (len([c for c in self.getTagsByContext(context)]) > 0)
def isTaggedWithValue(self, value):
for v in self.values:
if value == v:
return True
return False
@property
def tagged(self):
return self.tagFileExists
def __repr__(self):
return '<Item %s, %s>' % (self.name, self.tags)
class ItemAccess(object):
"""This is the access point to the Items.
"""
def __init__(self, system, dataDirectory, tagFileName, freebaseQueryParser, freebaseAdapter):
self.system = system
self.dataDirectory = dataDirectory
self.tagFileName = tagFileName
self.freebaseQueryParser = freebaseQueryParser
self.freebaseAdapter = freebaseAdapter
self.parseTime = 0
def __parseItems(self):
items = {}
logging.debug('Start parsing items from dir: %s', self.dataDirectory)
for itemName in os.listdir(self.dataDirectory):
if itemName == '.tagfs':
# skip directory with configuration
continue
try:
item = Item(itemName, self.system, self, self.freebaseQueryParser, self.freebaseAdapter)
items[itemName] = item
except IOError, (error, strerror):
logging.error('Can \'t read tags for item %s: %s',
itemName,
strerror)
logging.debug('Found %s items', len(items))
self.parseTime = time.time()
return items
@property
@cache
def items(self):
items = self.__parseItems()
logging.info('items: %s', items) # TODO remove me
return items
@property
@cache
def tags(self):
tags = set()
for item in self.items.itervalues():
if not item.tagged:
continue
tags = tags | item.tags
return tags
@property
@cache
def taggedItems(self):
return set([item for item in self.items.itervalues() if item.tagged])
@property
@cache
def untaggedItems(self):
return set([item for item in self.items.itervalues() if not item.tagged])
def getItemDirectory(self, item):
return os.path.join(self.dataDirectory, item)
def contextTags(self, context):
contextTags = set()
for tag in self.tags:
if tag.context == context:
contextTags.add(tag)
return contextTags
@property
@cache
def contexts(self):
contexts = set()
-
+
for tag in self.tags:
if tag.context == None:
continue
-
+
contexts.add(tag.context)
return contexts
@property
@cache
def values(self):
values = set()
for tag in self.tags:
values.add(tag.value)
return values
def __str__(self):
return '[' + ', '.join([field + ': ' + str(self.__dict__[field]) for field in ['dataDirectory', 'tagFileName']]) + ']'
diff --git a/src/modules/tagfs/node_filter.py b/src/modules/tagfs/node_filter.py
index 894f441..0eb500a 100644
--- a/src/modules/tagfs/node_filter.py
+++ b/src/modules/tagfs/node_filter.py
@@ -1,98 +1,98 @@
#
# Copyright 2011 Markus Pielmeier
#
# This file is part of tagfs.
#
# tagfs is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# tagfs is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with tagfs. If not, see <http://www.gnu.org/licenses/>.
#
from cache import cache
from node import Stat, ItemLinkNode, DirectoryNode
from node_export import ExportDirectoryNode
class FilterDirectoryNode(DirectoryNode):
def __init__(self, itemAccess, config):
self.itemAccess = itemAccess
self.config = config
@property
def attr(self):
s = super(FilterDirectoryNode, self).attr
# TODO why nlink == 2?
s.st_nlink = 2
# TODO write test case which tests st_mtime == itemAccess.parseTime
s.st_mtime = self.itemAccess.parseTime
s.st_ctime = s.st_mtime
s.st_atime = s.st_mtime
return s
@property
def contexts(self):
c = set()
for item in self.items:
for t in item.tags:
context = t.context
if context is None:
continue
- c.add(t.context)
+ c.add(context)
return c
@property
def _enableItemLinks(self):
return True
@property
def _entries(self):
# the import is not global because we want to prevent a cyclic
# dependency (ugly but works)
from node_filter_context import ContextValueListDirectoryNode
from node_filter_value import ValueFilterDirectoryNode
from node_filter_any_context import AnyContextValueListDirectoryNode
yield ExportDirectoryNode(self.itemAccess, self)
yield AnyContextValueListDirectoryNode(self.itemAccess, self.config, self)
if(self.config.enableValueFilters):
for value in self.itemAccess.values:
yield ValueFilterDirectoryNode(self.itemAccess, self.config, self, value)
for context in self.contexts:
yield ContextValueListDirectoryNode(self.itemAccess, self.config, self, context)
if(self._enableItemLinks):
for item in self.items:
yield ItemLinkNode(item)
def addsValue(self, parentItems):
itemsLen = len(list(self.items))
if(itemsLen == 0):
return False
# TODO we should not compare the lengths but whether the child and
# parent items are different
parentItemsLen = len(list(parentItems))
return itemsLen != parentItemsLen
def _addsValue(self, child):
return child.addsValue(self.items)
diff --git a/src/modules/tagfs/view.py b/src/modules/tagfs/view.py
index bb89462..e988a54 100644
--- a/src/modules/tagfs/view.py
+++ b/src/modules/tagfs/view.py
@@ -1,174 +1,176 @@
#!/usr/bin/env python
#
# Copyright 2009, 2010 Markus Pielmeier
#
# This file is part of tagfs.
#
# tagfs is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# tagfs is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with tagfs. If not, see <http://www.gnu.org/licenses/>.
#
import errno
import logging
import os
from log import logCall, logException
from cache import cache
from transient_dict import TransientDict
from node_root import RootDirectoryNode
from fuse import Direntry
class View(object):
"""Abstraction layer from fuse API.
This class is an abstraction layer from the fuse API. This should ease
writing test cases for the file system.
"""
DEFAULT_NODES = {
# directory icons for rox filer
'.DirIcon': None,
# launch script for rox filer application directories
'AppRun': None
}
def __init__(self, itemAccess, config):
self.itemAccess = itemAccess
self.config = config
self._entryCache = TransientDict(100)
@property
@cache
def rootNode(self):
return RootDirectoryNode(self.itemAccess, self.config)
def getNode(self, path):
if path in self._entryCache:
# simple path name based caching is implemented here
logging.debug('tagfs _entryCache hit')
return self._entryCache[path]
# ps contains the path segments
ps = [x for x in os.path.normpath(path).split(os.sep) if x != '']
psLen = len(ps)
if psLen > 0:
lastSegment = ps[psLen - 1]
if lastSegment in View.DEFAULT_NODES:
logging.debug('Using default node for path ' + path)
return View.DEFAULT_NODES[lastSegment]
e = self.rootNode
for pe in path.split('/')[1:]:
if pe == '':
continue
entries = e.entries
if not pe in entries:
# it seems like we are trying to fetch a node for an illegal
# path
return None
e = entries[pe]
logging.debug('tagfs _entryCache miss')
self._entryCache[path] = e
return e
@logCall
def getattr(self, path):
e = self.getNode(path)
if not e:
logging.debug('Try to read attributes from not existing node: ' + path)
return -errno.ENOENT
return e.attr
@logCall
def readdir(self, path, offset):
e = self.getNode(path)
if not e:
logging.warn('Try to read not existing directory: ' + path)
return -errno.ENOENT
# TODO care about offset parameter
- return [Direntry(name) for name in e.entries.iterkeys()]
+ # without the decode/encode operations fuse refuses to show directory
+ # entries which are based on freebase data
+ return [Direntry(name.decode('ascii', 'ignore').encode('ascii')) for name in e.entries.iterkeys()]
@logCall
def readlink(self, path):
n = self.getNode(path)
if not n:
logging.warn('Try to read not existing link from node: ' + path)
return -errno.ENOENT
return n.link
@logCall
def symlink(self, path, linkPath):
linkPathSegs = linkPath.split('/')
n = self.getNode('/'.join(linkPathSegs[0:len(linkPathSegs) - 2]))
if not n:
return -errno.ENOENT
return n.symlink(path, linkPath)
@logCall
def open(self, path, flags):
n = self.getNode(path)
if not n:
logging.warn('Try to open not existing node: ' + path)
return -errno.ENOENT
return n.open(path, flags)
@logCall
def read(self, path, len, offset):
n = self.getNode(path)
if not n:
logging.warn('Try to read from not existing node: ' + path)
return -errno.ENOENT
return n.read(path, len, offset)
@logCall
def write(self, path, data, pos):
n = self.getNode(path)
if not n:
logging.warn('Try to write to not existing node: ' + path)
return -errno.ENOENT
return n.write(path, data, pos)
|
marook/tagfs
|
5b9fbe80dc4634509ac371ed2c55842f14936f81
|
added debug logging / removed some dead code
|
diff --git a/src/modules/tagfs/item_access.py b/src/modules/tagfs/item_access.py
index 109da66..451ae32 100644
--- a/src/modules/tagfs/item_access.py
+++ b/src/modules/tagfs/item_access.py
@@ -1,309 +1,307 @@
#
# Copyright 2009 Markus Pielmeier
#
# This file is part of tagfs.
#
# tagfs is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# tagfs is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with tagfs. If not, see <http://www.gnu.org/licenses/>.
import logging
import os
import time
import traceback
from cache import cache
import sysIO
class Tag(object):
def __init__(self, value, context = None):
if context == None:
self.context = None
else:
self.context = context.strip()
self.value = value.strip()
if not self.context == None and len(self.context) == 0:
# we don't allow empty strings as they can't be represented as a
# directory very well
raise ValueError()
if len(self.value) == 0:
# we don't allow empty strings as they can't be represented as a
# directory very well
raise ValueError()
def __hash__(self):
return (self.context, self.value).__hash__()
def __eq__(self, other):
return self.value == other.value and self.context == other.context
def __repr__(self):
return '<Tag %s: %s>' % (self.context, self.value)
def parseTagsFromFile(system, tagFileName):
"""Parses the tags from the specified file.
@return: The parsed values are returned as a set containing Tag objects.
@see: Tag
"""
tags = set()
with system.open(tagFileName, 'r') as tagFile:
for rawTag in tagFile:
rawTag = rawTag.strip()
try:
if len(rawTag) == 0:
continue
tagTuple = rawTag.split(':', 1)
if len(tagTuple) == 1:
tagContext = None
tagValue = tagTuple[0]
else:
tagContext = tagTuple[0]
tagValue = tagTuple[1]
tag = Tag(tagValue, context = tagContext)
tags.add(tag)
except:
logging.warning('Skipping tagging \'%s\' from file \'%s\' as it can\'t be parsed\n%s.' % (rawTag, tagFileName, traceback.format_exc()))
return tags
class Item(object):
def __init__(self, name, system, itemAccess, freebaseQueryParser, freebaseAdapter, parseTagsFromFile = parseTagsFromFile):
self.name = name
self.system = system
self.itemAccess = itemAccess
self.freebaseQueryParser = freebaseQueryParser
self.freebaseAdapter = freebaseAdapter
self.parseTagsFromFile = parseTagsFromFile
# TODO register at file system to receive tag file change events.
@property
@cache
def itemDirectory(self):
return os.path.join(self.itemAccess.dataDirectory, self.name)
@property
@cache
def _tagFileName(self):
"""Returns the name of the tag file for this item.
"""
- itemDirectory = self.itemDirectory
-
- return os.path.join(itemDirectory, self.itemAccess.tagFileName)
+ return os.path.join(self.itemDirectory, self.itemAccess.tagFileName)
@property
+ @cache
def tagFileExists(self):
return self.system.pathExists(self._tagFileName)
def __parseTags(self):
tagFileName = self._tagFileName
for rawTag in self.parseTagsFromFile(self.system, tagFileName):
if(rawTag.context == '_freebase'):
query = self.freebaseQueryParser.parse(rawTag.value)
for context, values in self.freebaseAdapter.execute(query).iteritems():
for value in values:
yield Tag(value, context)
else:
yield rawTag
@property
@cache
def tagsCreationTime(self):
- tagFileName = self._tagFileName
-
if not self.tagFileExists:
return None
return os.path.getctime(self._tagFileName)
@property
@cache
def tagsModificationTime(self):
"""Returns the last time when the tags have been modified.
"""
- tagFileName = self._tagFileName
-
if not self.tagFileExists:
return None
- return os.path.getmtime(tagFileName)
+ return os.path.getmtime(self._tagFileName)
@property
@cache
def tags(self):
"""Returns the tags as a list for this item.
"""
if not self.tagFileExists:
return None
return list(self.__parseTags())
@property
def values(self):
for t in self.tags:
yield t.value
def getTagsByContext(self, context):
for t in self.tags:
if context != t.context:
continue
yield t
def isTaggedWithContextValue(self, context, value):
for t in self.getTagsByContext(context):
if value == t.value:
return True
return False
def isTaggedWithContext(self, context):
# TODO don't create whole list... just check wheather list is empty
return (len([c for c in self.getTagsByContext(context)]) > 0)
def isTaggedWithValue(self, value):
for v in self.values:
if value == v:
return True
return False
@property
- @cache
def tagged(self):
- return os.path.exists(self._tagFileName)
+ return self.tagFileExists
def __repr__(self):
- return '<Item %s>' % self.name
+ return '<Item %s, %s>' % (self.name, self.tags)
class ItemAccess(object):
"""This is the access point to the Items.
"""
def __init__(self, system, dataDirectory, tagFileName, freebaseQueryParser, freebaseAdapter):
self.system = system
self.dataDirectory = dataDirectory
self.tagFileName = tagFileName
self.freebaseQueryParser = freebaseQueryParser
self.freebaseAdapter = freebaseAdapter
self.parseTime = 0
def __parseItems(self):
items = {}
logging.debug('Start parsing items from dir: %s', self.dataDirectory)
for itemName in os.listdir(self.dataDirectory):
if itemName == '.tagfs':
# skip directory with configuration
continue
try:
item = Item(itemName, self.system, self, self.freebaseQueryParser, self.freebaseAdapter)
items[itemName] = item
except IOError, (error, strerror):
logging.error('Can \'t read tags for item %s: %s',
itemName,
strerror)
logging.debug('Found %s items', len(items))
self.parseTime = time.time()
return items
@property
@cache
def items(self):
- return self.__parseItems()
+ items = self.__parseItems()
+
+ logging.info('items: %s', items) # TODO remove me
+
+ return items
@property
@cache
def tags(self):
tags = set()
for item in self.items.itervalues():
if not item.tagged:
continue
tags = tags | item.tags
return tags
@property
@cache
def taggedItems(self):
return set([item for item in self.items.itervalues() if item.tagged])
@property
@cache
def untaggedItems(self):
return set([item for item in self.items.itervalues() if not item.tagged])
def getItemDirectory(self, item):
return os.path.join(self.dataDirectory, item)
def contextTags(self, context):
contextTags = set()
for tag in self.tags:
if tag.context == context:
contextTags.add(tag)
return contextTags
@property
@cache
def contexts(self):
contexts = set()
for tag in self.tags:
if tag.context == None:
continue
contexts.add(tag.context)
return contexts
@property
@cache
def values(self):
values = set()
for tag in self.tags:
values.add(tag.value)
return values
def __str__(self):
return '[' + ', '.join([field + ': ' + str(self.__dict__[field]) for field in ['dataDirectory', 'tagFileName']]) + ']'
|
marook/tagfs
|
502b8a0878bd35ba683db27317fc2fb020da9c93
|
removed unused object members
|
diff --git a/src/modules/tagfs/item_access.py b/src/modules/tagfs/item_access.py
index 1abddb6..109da66 100644
--- a/src/modules/tagfs/item_access.py
+++ b/src/modules/tagfs/item_access.py
@@ -1,313 +1,309 @@
#
# Copyright 2009 Markus Pielmeier
#
# This file is part of tagfs.
#
# tagfs is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# tagfs is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with tagfs. If not, see <http://www.gnu.org/licenses/>.
import logging
import os
import time
import traceback
from cache import cache
import sysIO
class Tag(object):
def __init__(self, value, context = None):
if context == None:
self.context = None
else:
self.context = context.strip()
self.value = value.strip()
if not self.context == None and len(self.context) == 0:
# we don't allow empty strings as they can't be represented as a
# directory very well
raise ValueError()
if len(self.value) == 0:
# we don't allow empty strings as they can't be represented as a
# directory very well
raise ValueError()
def __hash__(self):
return (self.context, self.value).__hash__()
def __eq__(self, other):
return self.value == other.value and self.context == other.context
def __repr__(self):
return '<Tag %s: %s>' % (self.context, self.value)
def parseTagsFromFile(system, tagFileName):
"""Parses the tags from the specified file.
@return: The parsed values are returned as a set containing Tag objects.
@see: Tag
"""
tags = set()
with system.open(tagFileName, 'r') as tagFile:
for rawTag in tagFile:
rawTag = rawTag.strip()
try:
if len(rawTag) == 0:
continue
tagTuple = rawTag.split(':', 1)
if len(tagTuple) == 1:
tagContext = None
tagValue = tagTuple[0]
else:
tagContext = tagTuple[0]
tagValue = tagTuple[1]
tag = Tag(tagValue, context = tagContext)
tags.add(tag)
except:
logging.warning('Skipping tagging \'%s\' from file \'%s\' as it can\'t be parsed\n%s.' % (rawTag, tagFileName, traceback.format_exc()))
return tags
class Item(object):
def __init__(self, name, system, itemAccess, freebaseQueryParser, freebaseAdapter, parseTagsFromFile = parseTagsFromFile):
self.name = name
self.system = system
self.itemAccess = itemAccess
self.freebaseQueryParser = freebaseQueryParser
self.freebaseAdapter = freebaseAdapter
self.parseTagsFromFile = parseTagsFromFile
# TODO register at file system to receive tag file change events.
@property
@cache
def itemDirectory(self):
return os.path.join(self.itemAccess.dataDirectory, self.name)
@property
@cache
def _tagFileName(self):
"""Returns the name of the tag file for this item.
"""
itemDirectory = self.itemDirectory
return os.path.join(itemDirectory, self.itemAccess.tagFileName)
@property
def tagFileExists(self):
return self.system.pathExists(self._tagFileName)
def __parseTags(self):
tagFileName = self._tagFileName
for rawTag in self.parseTagsFromFile(self.system, tagFileName):
if(rawTag.context == '_freebase'):
query = self.freebaseQueryParser.parse(rawTag.value)
for context, values in self.freebaseAdapter.execute(query).iteritems():
for value in values:
yield Tag(value, context)
else:
yield rawTag
@property
@cache
def tagsCreationTime(self):
tagFileName = self._tagFileName
if not self.tagFileExists:
return None
return os.path.getctime(self._tagFileName)
@property
@cache
def tagsModificationTime(self):
"""Returns the last time when the tags have been modified.
"""
tagFileName = self._tagFileName
if not self.tagFileExists:
return None
return os.path.getmtime(tagFileName)
@property
@cache
def tags(self):
"""Returns the tags as a list for this item.
"""
if not self.tagFileExists:
return None
return list(self.__parseTags())
@property
def values(self):
for t in self.tags:
yield t.value
def getTagsByContext(self, context):
for t in self.tags:
if context != t.context:
continue
yield t
def isTaggedWithContextValue(self, context, value):
for t in self.getTagsByContext(context):
if value == t.value:
return True
return False
def isTaggedWithContext(self, context):
# TODO don't create whole list... just check wheather list is empty
return (len([c for c in self.getTagsByContext(context)]) > 0)
def isTaggedWithValue(self, value):
for v in self.values:
if value == v:
return True
return False
@property
@cache
def tagged(self):
return os.path.exists(self._tagFileName)
def __repr__(self):
return '<Item %s>' % self.name
class ItemAccess(object):
"""This is the access point to the Items.
"""
def __init__(self, system, dataDirectory, tagFileName, freebaseQueryParser, freebaseAdapter):
self.system = system
self.dataDirectory = dataDirectory
self.tagFileName = tagFileName
self.freebaseQueryParser = freebaseQueryParser
self.freebaseAdapter = freebaseAdapter
- self.__items = None
- self.__tags = None
- self.__taggedItems = None
- self.__untaggedItems = None
self.parseTime = 0
def __parseItems(self):
items = {}
logging.debug('Start parsing items from dir: %s', self.dataDirectory)
for itemName in os.listdir(self.dataDirectory):
if itemName == '.tagfs':
# skip directory with configuration
continue
try:
item = Item(itemName, self.system, self, self.freebaseQueryParser, self.freebaseAdapter)
items[itemName] = item
except IOError, (error, strerror):
logging.error('Can \'t read tags for item %s: %s',
itemName,
strerror)
logging.debug('Found %s items', len(items))
self.parseTime = time.time()
return items
@property
@cache
def items(self):
return self.__parseItems()
@property
@cache
def tags(self):
tags = set()
for item in self.items.itervalues():
if not item.tagged:
continue
tags = tags | item.tags
return tags
@property
@cache
def taggedItems(self):
return set([item for item in self.items.itervalues() if item.tagged])
@property
@cache
def untaggedItems(self):
return set([item for item in self.items.itervalues() if not item.tagged])
def getItemDirectory(self, item):
return os.path.join(self.dataDirectory, item)
def contextTags(self, context):
contextTags = set()
for tag in self.tags:
if tag.context == context:
contextTags.add(tag)
return contextTags
@property
@cache
def contexts(self):
contexts = set()
for tag in self.tags:
if tag.context == None:
continue
contexts.add(tag.context)
return contexts
@property
@cache
def values(self):
values = set()
for tag in self.tags:
values.add(tag.value)
return values
def __str__(self):
return '[' + ', '.join([field + ': ' + str(self.__dict__[field]) for field in ['dataDirectory', 'tagFileName']]) + ']'
|
marook/tagfs
|
012a0556c2f598132256b84f903da9db1ec02a9a
|
implemented some freebase integratoin
|
diff --git a/README b/README
index e2e3434..dd97abe 100644
--- a/README
+++ b/README
@@ -1,180 +1,183 @@
tagfs - tag file system
1) Introduction
2) Requirements
3) Installation
4) Tagging Files
5) Usage
6) Configuration
6.1) Options
6.1.1) tagFileName
6.1.2) enableValueFilters
6.1.3) enableRootItemLinks
7) Freebase Integration
8) Further Reading
9) Contact
---------------------------------------------------------------------
Introduction
tagfs is used to organize your files using tags.
This document contains basic usage instructions for users. To develop or debug
tagfs see the README.dev file.
---------------------------------------------------------------------
Requirements
* python 2.5, 2.6, 2.7
* Linux kernel with fuse enabled
* python-fuse installed
---------------------------------------------------------------------
Installation
To install tagfs into your home directory type the following:
$ python setup.py test e2e_test install --home ~/.local
If you haven't already extended your local python path then add the following
to your environment configuration script. For example to your ~/.bashrc:
$ export PYTHONPATH=~/.local/lib/python:$PYTHONPATH
You may also need to add ~/.local/bin to your PATH environment variable:
$ export PATH=~/.local/bin:$PATH
---------------------------------------------------------------------
Tagging Files
Before you can filter anything using tagfs you need to tag your items. An item
is a directory which contains a file called .tag. All items must be below one
directory.
Let's create a simple item structure.
First we create the root directory for all items:
$ mkdir items
Then we create our first item:
$ mkdir items/Ted
We tag the 'Ted' item as movie:
$ echo movie >> items/Ted/.tag
We also tag 'Ted' as genre comedy:
$ echo 'genre: comedy' >> items/Ted/.tag
Then we add a second item:
$ mkdir items/banana
$ echo fruit >> items/banana/.tag
$ echo 'genre: delicious' >> items/banana/.tag
Modifying .tag files using echo, grep, sed may be a little hard sometimes.
There are some convenience scripts available through the tagfs-utils project.
See https://github.com/marook/tagfs-utils for details.
---------------------------------------------------------------------
Usage
After installation tagfs can be started the following way.
Mount a tagged directory:
$ tagfs -i /path/to/my/items/directory /path/to/my/mount/point
Unmount a tagged directory:
$ fusermount -u /path/to/my/mount/point
Right now tagfs reads the taggings only when it's getting mounted. So if you
modify the tags after mounting you will not see any changes in the tagfs file
system.
In general tagfs will try to reduce the number of filter directories below the
virtual file system. That's why you may not see some filters which would not
reduce the number of selected items.
---------------------------------------------------------------------
Configuration
tagfs can be configured through configuration files. Configuration files are
searched in different locations by tagfs. The following locations are used.
Locations with higher priority come first:
- <items directory>/.tagfs/tagfs.conf
- ~/.tagfs/tagfs.conf
- /etc/tagfs/tagfs.conf
Right now the following configuration options are supported.
---------------------------------------------------------------------
Configuration - Options - tagFileName
Through this option the name of the parsed tag files can be specified. The
default value is '.tag'.
Example:
[global]
tagFileName = ABOUT
---------------------------------------------------------------------
Configuration - Options - enableValueFilters
You can enable or disable value filters. If you enable value filters you will
see filter directories for each tag value. For value filters the tag's
context can be anyone. The default value is 'false'.
Example:
[global]
enableValueFilters = true
---------------------------------------------------------------------
Configuration - Options - enableRootItemLinks
To show links to all items in the tagfs '/' directory enable this option. The
default value is 'false'.
Example:
[global]
enableRootItemLinks = true
---------------------------------------------------------------------
Freebase Integration
Freebase is an open graph of people, places and things. tagfs allows you to
extend your own taggings with data directly from the freebase graph.
WARNING! Freebase support is currently experimental. It is very likely that the
freebase syntax within the .tag files will change in future releases of tagfs.
In order to use freebase you need to install the freebase-python bindings. They
are available via https://code.google.com/p/freebase-python/
-TODO howto query freebase
+To extend an item's taggings with freebase data you have to add a freebase query
+to the item's .tag file. Here's an example:
+
+_freebase: {'id': '/m/0clpml', 'type': '/fictional_universe/fictional_character', 'name': null, 'occupation': null}
---------------------------------------------------------------------
Further Reading
Using a file system for my bank account (Markus Pielmeier)
http://pielmeier.blogspot.com/2010/08/using-file-system-for-my-bank-account.html
---------------------------------------------------------------------
Contact
* homepage: http://wiki.github.com/marook/tagfs
* user group: http://groups.google.com/group/tagfs
* author: Markus Pielmeier <[email protected]>
diff --git a/src/modules/tagfs/freebase_support.py b/src/modules/tagfs/freebase_support.py
new file mode 100644
index 0000000..2933855
--- /dev/null
+++ b/src/modules/tagfs/freebase_support.py
@@ -0,0 +1,71 @@
+#
+# Copyright 2012 Markus Pielmeier
+#
+# This file is part of tagfs.
+#
+# tagfs is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# tagfs is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with tagfs. If not, see <http://www.gnu.org/licenses/>.
+
+import json
+import freebase
+
+def createFreebaseAdapter(object):
+
+ import freebase
+
+ return FreebaseAdapter()
+
+class FreebaseAdapter(object):
+
+ def execute(self, query):
+ fbResult = freebase.mqlread(query.freebaseQuery)
+
+ result = {}
+
+ for key in query.selectedKeys:
+ result[key] = fbResult[key]
+
+ return result
+
+class Query(object):
+
+ def __init__(self, queryObject):
+ self.queryObject = queryObject
+
+ @property
+ def freebaseQuery(self):
+ q = {}
+
+ for key, value in self.queryObject.iteritems():
+ if(value is None):
+ q[key] = []
+ else:
+ q[key] = value
+
+ return q
+
+ @property
+ def queryString(self):
+ # TODO this func is only used in tests => remove
+ return json.dumps(self.freebaseQuery, separators = (',', ':'))
+
+ @property
+ def selectedKeys(self):
+ for key, value in self.queryObject.iteritems():
+ if(value is None):
+ yield key
+
+class QueryParser(object):
+
+ def parse(self, queryString):
+ return Query(json.loads(queryString))
diff --git a/src/modules/tagfs/item_access.py b/src/modules/tagfs/item_access.py
index 569e591..1abddb6 100644
--- a/src/modules/tagfs/item_access.py
+++ b/src/modules/tagfs/item_access.py
@@ -1,297 +1,313 @@
#
# Copyright 2009 Markus Pielmeier
#
# This file is part of tagfs.
#
# tagfs is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# tagfs is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with tagfs. If not, see <http://www.gnu.org/licenses/>.
import logging
import os
import time
import traceback
from cache import cache
-import tagfs.sysIO as sysIO
+import sysIO
class Tag(object):
def __init__(self, value, context = None):
if context == None:
self.context = None
else:
self.context = context.strip()
self.value = value.strip()
if not self.context == None and len(self.context) == 0:
# we don't allow empty strings as they can't be represented as a
# directory very well
raise ValueError()
if len(self.value) == 0:
# we don't allow empty strings as they can't be represented as a
# directory very well
raise ValueError()
def __hash__(self):
return (self.context, self.value).__hash__()
def __eq__(self, other):
return self.value == other.value and self.context == other.context
def __repr__(self):
return '<Tag %s: %s>' % (self.context, self.value)
def parseTagsFromFile(system, tagFileName):
"""Parses the tags from the specified file.
@return: The parsed values are returned as a set containing Tag objects.
@see: Tag
"""
tags = set()
with system.open(tagFileName, 'r') as tagFile:
for rawTag in tagFile:
rawTag = rawTag.strip()
try:
if len(rawTag) == 0:
continue
tagTuple = rawTag.split(':', 1)
if len(tagTuple) == 1:
tagContext = None
tagValue = tagTuple[0]
else:
tagContext = tagTuple[0]
tagValue = tagTuple[1]
tag = Tag(tagValue, context = tagContext)
tags.add(tag)
except:
logging.warning('Skipping tagging \'%s\' from file \'%s\' as it can\'t be parsed\n%s.' % (rawTag, tagFileName, traceback.format_exc()))
return tags
class Item(object):
- def __init__(self, name, system, itemAccess, parseTagsFromFile = parseTagsFromFile):
+ def __init__(self, name, system, itemAccess, freebaseQueryParser, freebaseAdapter, parseTagsFromFile = parseTagsFromFile):
self.name = name
self.system = system
self.itemAccess = itemAccess
+ self.freebaseQueryParser = freebaseQueryParser
+ self.freebaseAdapter = freebaseAdapter
self.parseTagsFromFile = parseTagsFromFile
# TODO register at file system to receive tag file change events.
@property
@cache
def itemDirectory(self):
return os.path.join(self.itemAccess.dataDirectory, self.name)
@property
@cache
def _tagFileName(self):
"""Returns the name of the tag file for this item.
"""
itemDirectory = self.itemDirectory
return os.path.join(itemDirectory, self.itemAccess.tagFileName)
+
+ @property
+ def tagFileExists(self):
+ return self.system.pathExists(self._tagFileName)
def __parseTags(self):
tagFileName = self._tagFileName
- if not os.path.exists(tagFileName):
- return None
-
- return self.parseTagsFromFile(self.system, tagFileName)
+ for rawTag in self.parseTagsFromFile(self.system, tagFileName):
+ if(rawTag.context == '_freebase'):
+ query = self.freebaseQueryParser.parse(rawTag.value)
+
+ for context, values in self.freebaseAdapter.execute(query).iteritems():
+ for value in values:
+ yield Tag(value, context)
+ else:
+ yield rawTag
@property
@cache
def tagsCreationTime(self):
tagFileName = self._tagFileName
- if not os.path.exists(tagFileName):
+ if not self.tagFileExists:
return None
return os.path.getctime(self._tagFileName)
@property
@cache
def tagsModificationTime(self):
"""Returns the last time when the tags have been modified.
"""
tagFileName = self._tagFileName
- if not os.path.exists(tagFileName):
+ if not self.tagFileExists:
return None
return os.path.getmtime(tagFileName)
@property
@cache
def tags(self):
"""Returns the tags as a list for this item.
"""
- return self.__parseTags()
+ if not self.tagFileExists:
+ return None
+
+ return list(self.__parseTags())
@property
def values(self):
for t in self.tags:
yield t.value
def getTagsByContext(self, context):
for t in self.tags:
if context != t.context:
continue
yield t
def isTaggedWithContextValue(self, context, value):
for t in self.getTagsByContext(context):
if value == t.value:
return True
return False
def isTaggedWithContext(self, context):
# TODO don't create whole list... just check wheather list is empty
return (len([c for c in self.getTagsByContext(context)]) > 0)
def isTaggedWithValue(self, value):
for v in self.values:
if value == v:
return True
return False
@property
@cache
def tagged(self):
return os.path.exists(self._tagFileName)
def __repr__(self):
return '<Item %s>' % self.name
class ItemAccess(object):
"""This is the access point to the Items.
"""
- def __init__(self, system, dataDirectory, tagFileName):
+ def __init__(self, system, dataDirectory, tagFileName, freebaseQueryParser, freebaseAdapter):
self.system = system
self.dataDirectory = dataDirectory
self.tagFileName = tagFileName
+ self.freebaseQueryParser = freebaseQueryParser
+ self.freebaseAdapter = freebaseAdapter
self.__items = None
self.__tags = None
self.__taggedItems = None
self.__untaggedItems = None
self.parseTime = 0
def __parseItems(self):
items = {}
logging.debug('Start parsing items from dir: %s', self.dataDirectory)
for itemName in os.listdir(self.dataDirectory):
if itemName == '.tagfs':
# skip directory with configuration
continue
try:
- item = Item(itemName, self.system, self)
+ item = Item(itemName, self.system, self, self.freebaseQueryParser, self.freebaseAdapter)
items[itemName] = item
except IOError, (error, strerror):
logging.error('Can \'t read tags for item %s: %s',
itemName,
strerror)
logging.debug('Found %s items', len(items))
self.parseTime = time.time()
return items
@property
@cache
def items(self):
return self.__parseItems()
@property
@cache
def tags(self):
tags = set()
for item in self.items.itervalues():
if not item.tagged:
continue
tags = tags | item.tags
return tags
@property
@cache
def taggedItems(self):
return set([item for item in self.items.itervalues() if item.tagged])
@property
@cache
def untaggedItems(self):
return set([item for item in self.items.itervalues() if not item.tagged])
def getItemDirectory(self, item):
return os.path.join(self.dataDirectory, item)
def contextTags(self, context):
contextTags = set()
for tag in self.tags:
if tag.context == context:
contextTags.add(tag)
return contextTags
@property
@cache
def contexts(self):
contexts = set()
for tag in self.tags:
if tag.context == None:
continue
contexts.add(tag.context)
return contexts
@property
@cache
def values(self):
values = set()
for tag in self.tags:
values.add(tag.value)
return values
def __str__(self):
return '[' + ', '.join([field + ': ' + str(self.__dict__[field]) for field in ['dataDirectory', 'tagFileName']]) + ']'
diff --git a/src/modules/tagfs/main.py b/src/modules/tagfs/main.py
index c73ba6f..ef162db 100644
--- a/src/modules/tagfs/main.py
+++ b/src/modules/tagfs/main.py
@@ -1,182 +1,183 @@
#!/usr/bin/env python
#
# Copyright 2009, 2010 Markus Pielmeier
#
# This file is part of tagfs.
#
# tagfs is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# tagfs is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with tagfs. If not, see <http://www.gnu.org/licenses/>.
#
#
# = tag fs =
# == glossary ==
# * item: An item is a directory in the item container directory. Items can be
# tagged using a tag file.
# * tag: A tag is a text string which can be assigned to an item. Tags can
# consist of any character except newlines.
import os
import stat
import errno
import exceptions
import time
import functools
import logging
import fuse
if not hasattr(fuse, '__version__'):
raise RuntimeError, \
"your fuse-py doesn't know of fuse.__version__, probably it's too old."
fuse.fuse_python_api = (0, 2)
from view import View
from cache import cache
from item_access import ItemAccess
from config import parseConfig
from log import logException
-import tagfs.sysIO as sysIO
+import sysIO
+import freebase_support
class TagFS(fuse.Fuse):
def __init__(self, initwd, *args, **kw):
fuse.Fuse.__init__(self, *args, **kw)
self._initwd = initwd
self._itemsRoot = None
# TODO change command line arguments structure
# goal: tagfs <items dir> <mount dir>
self.parser.add_option('-i',
'--items-dir',
dest = 'itemsDir',
help = 'items directory',
metavar = 'dir')
self.parser.add_option('-t',
'--tag-file',
dest = 'tagFileName',
help = 'tag file name',
metavar = 'file',
default = None)
self.parser.add_option('--value-filter',
action = 'store_true',
dest = 'enableValueFilters',
help = 'Displays value filter directories on toplevel instead of only context entries',
default = None)
self.parser.add_option('--root-items',
action = 'store_true',
dest = 'enableRootItemLinks',
help = 'Display item links in tagfs root directory.',
default = None)
def getItemAccess(self):
# Maybe we should move the parser run from main here.
# Or we should at least check if it was run once...
opts, args = self.cmdline
# Maybe we should add expand user? Maybe even vars???
assert opts.itemsDir != None and opts.itemsDir != ''
itemsRoot = os.path.normpath(
os.path.join(self._initwd, opts.itemsDir))
# TODO rel https://github.com/marook/tagfs/issues#issue/2
# Ensure that mount-point and items dir are disjoined.
# Something along
# assert not os.path.normpath(itemsDir).startswith(itemsRoot)
# try/except here?
try:
- return ItemAccess(sysIO.createSystem(), itemsRoot, self.config.tagFileName)
+ return ItemAccess(sysIO.createSystem(), itemsRoot, self.config.tagFileName, freebase_support.QueryParser(), freebase_support.FreebaseAdapter())
except OSError, e:
logging.error("Can't create item access from items directory %s. Reason: %s",
itemsRoot, str(e.strerror))
raise
@property
@cache
def config(self):
opts, args = self.cmdline
c = parseConfig(os.path.normpath(os.path.join(self._initwd, opts.itemsDir)))
if opts.tagFileName:
c.tagFileName = opts.tagFileName
if opts.enableValueFilters:
c.enableValueFilters = opts.enableValueFilters
if opts.enableRootItemLinks:
c.enableRootItemLinks = opts.enableRootItemLinks
logging.debug('Using configuration %s' % c)
return c
@property
@cache
def view(self):
itemAccess = self.getItemAccess()
return View(itemAccess, self.config)
@logException
def getattr(self, path):
return self.view.getattr(path)
@logException
def readdir(self, path, offset):
return self.view.readdir(path, offset)
@logException
def readlink(self, path):
return self.view.readlink(path)
@logException
def open(self, path, flags):
return self.view.open(path, flags)
@logException
def read(self, path, size, offset):
return self.view.read(path, size, offset)
@logException
def write(self, path, data, pos):
return self.view.write(path, data, pos)
@logException
def symlink(self, path, linkPath):
return self.view.symlink(path, linkPath)
def main():
fs = TagFS(os.getcwd(),
version = "%prog " + fuse.__version__,
dash_s_do = 'setsingle')
fs.parse(errex = 1)
opts, args = fs.cmdline
if opts.itemsDir == None:
fs.parser.print_help()
# items dir should probably be an arg, not an option.
print "Error: Missing items directory option."
# Quickfix rel https://github.com/marook/tagfs/issues/#issue/3
# FIXME: since we run main via sys.exit(main()), this should
# probably be handled via some return code.
import sys
sys.exit()
return fs.main()
if __name__ == '__main__':
import sys
sys.exit(main())
diff --git a/src/modules/tagfs/sysIO.py b/src/modules/tagfs/sysIO.py
index 71a37cb..0e856d2 100644
--- a/src/modules/tagfs/sysIO.py
+++ b/src/modules/tagfs/sysIO.py
@@ -1,29 +1,32 @@
#
# Copyright 2012 Markus Pielmeier
#
# This file is part of tagfs.
#
# tagfs is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# tagfs is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with tagfs. If not, see <http://www.gnu.org/licenses/>.
+import os.path
+
def createSystem():
- return System(open = open)
+ return System(open = open, pathExists = os.path.exists)
class System(object):
'''Abstraction layer for system access.
This class can be used to mock system access in tests.
'''
- def __init__(self, open = None):
+ def __init__(self, open = None, pathExists = None):
self.open = open
+ self.pathExists = pathExists
diff --git a/src/test/tagfs_test_small/systemMocks.py b/src/test/tagfs_test_small/systemMocks.py
index a6c1058..416f250 100644
--- a/src/test/tagfs_test_small/systemMocks.py
+++ b/src/test/tagfs_test_small/systemMocks.py
@@ -1,40 +1,43 @@
#
# Copyright 2012 Markus Pielmeier
#
# This file is part of tagfs.
#
# tagfs is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# tagfs is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with tagfs. If not, see <http://www.gnu.org/licenses/>.
class ReadLineFileMock(object):
def __init__(self, lines):
self.lines = lines
def __enter__(self, *args, **kwargs):
return self.lines
def __exit__(self, *args, **kwargs):
pass
class SystemMock(object):
def __init__(self, test, readFiles = {}):
self.test = test
self.readFiles = readFiles
def open(self, fileName, mode):
if(mode == 'r'):
return self.readFiles[fileName]
self.test.fail('Unknown file mode %s' % mode)
+
+ def pathExists(self, path):
+ return path in self.readFiles
diff --git a/src/test/tagfs_test_small/test_freebase_support_query.py b/src/test/tagfs_test_small/test_freebase_support_query.py
new file mode 100644
index 0000000..b940403
--- /dev/null
+++ b/src/test/tagfs_test_small/test_freebase_support_query.py
@@ -0,0 +1,33 @@
+#
+# Copyright 2012 Markus Pielmeier
+#
+# This file is part of tagfs.
+#
+# tagfs is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# tagfs is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with tagfs. If not, see <http://www.gnu.org/licenses/>.
+
+import unittest
+import tagfs.freebase_support as freebase_support
+
+class WhenQueryWithOneFilerAndOneSelector(unittest.TestCase):
+
+ def setUp(self):
+ super(WhenQueryWithOneFilerAndOneSelector, self).setUp()
+
+ self.query = freebase_support.Query({'filter': 'filterValue', 'selector': None, })
+
+ def testThenSelectedKeysIsSelector(self):
+ self.assertEqual(list(self.query.selectedKeys), ['selector',])
+
+ def testThenQueryStringIs(self):
+ self.assertEqual(self.query.queryString, '{"filter":"filterValue","selector":[]}')
diff --git a/src/test/tagfs_test_small/test_item_access_item.py b/src/test/tagfs_test_small/test_item_access_item.py
new file mode 100644
index 0000000..d611a19
--- /dev/null
+++ b/src/test/tagfs_test_small/test_item_access_item.py
@@ -0,0 +1,63 @@
+#
+# Copyright 2012 Markus Pielmeier
+#
+# This file is part of tagfs.
+#
+# tagfs is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# tagfs is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with tagfs. If not, see <http://www.gnu.org/licenses/>.
+
+import unittest
+
+import tagfs.item_access as item_access
+import systemMocks
+
+class ItemAccessMock(object):
+
+ def __init__(self, dataDirectory, tagFileName):
+ self.dataDirectory = dataDirectory
+ self.tagFileName = tagFileName
+
+class FreebaseQueryParserMock(object):
+
+ def __init__(self, test):
+ self.test = test
+
+ def parse(self, queryString):
+ return queryString
+
+class FreebaseAdapterMock(object):
+
+ def __init__(self, test):
+ self.test = test
+
+ def execute(self, query):
+ return {
+ 'freebaseContext': ['freebaseValue'],
+ }
+
+class WhenItemHasFreebaseQueryTag(unittest.TestCase):
+
+ def setUp(self):
+ super(WhenItemHasFreebaseQueryTag, self).setUp()
+
+ self.system = systemMocks.SystemMock(self)
+ self.system.readFiles['/path/to/my/data/directory/myItem/.tag'] = systemMocks.ReadLineFileMock(['_freebase: myFreebaseQuery',])
+
+ self.itemAccess = ItemAccessMock('/path/to/my/data/directory', '.tag')
+ self.freebaseQueryParser = FreebaseQueryParserMock(self)
+ self.freebaseAdapter = FreebaseAdapterMock(self)
+
+ self.item = item_access.Item('myItem', self.system, self.itemAccess, self.freebaseQueryParser, self.freebaseAdapter)
+
+ def testThenItemHasFreebaseTaggingsFromItemAccess(self):
+ self.assertEqual(list(self.item.getTagsByContext('freebaseContext')), [item_access.Tag('freebaseValue', 'freebaseContext'),])
diff --git a/src/test/tagfs_test_small/test_item_access_tag.py b/src/test/tagfs_test_small/test_item_access_tag.py
index 6c82215..dc6e6c6 100644
--- a/src/test/tagfs_test_small/test_item_access_tag.py
+++ b/src/test/tagfs_test_small/test_item_access_tag.py
@@ -1,36 +1,36 @@
#
# Copyright 2012 Markus Pielmeier
#
# This file is part of tagfs.
#
# tagfs is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# tagfs is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with tagfs. If not, see <http://www.gnu.org/licenses/>.
#
from unittest import TestCase
-from tagfs import item_access
+import tagfs.item_access as item_access
class TagTest(TestCase):
def testTagValueInfluencesHash(self):
self.assertTrue(item_access.Tag('a', None).__hash__() != item_access.Tag('b', None).__hash__())
def testTagContextInfluencesHash(self):
self.assertTrue(item_access.Tag('v', None).__hash__() != item_access.Tag('v', 'c').__hash__())
def testEqualTagsEqWhenContextNone(self):
self.assertTrue(item_access.Tag('t', None).__eq__(item_access.Tag('t', None)))
def testEqualTagsEqWhenContextStr(self):
self.assertTrue(item_access.Tag('t', 'c').__eq__(item_access.Tag('t', 'c')))
|
marook/tagfs
|
d4e1b132e081a4c67e046113a23a7e9d0014ef4d
|
fixed my email address :)
|
diff --git a/AUTHORS b/AUTHORS
index 1a2c117..d2e400d 100644
--- a/AUTHORS
+++ b/AUTHORS
@@ -1,2 +1,2 @@
-Markus Pielmeier <[email protected]>
+Markus Pielmeier <[email protected]>
Peter Prohaska <[email protected]>
diff --git a/README b/README
index 024f7c8..6cfc235 100644
--- a/README
+++ b/README
@@ -1,164 +1,164 @@
tagfs - tag file system
1) Introduction
2) Requirements
3) Installation
4) Tagging Files
5) Usage
6) Configuration
6.1) Options
6.1.1) tagFileName
6.1.2) enableValueFilters
6.1.3) enableRootItemLinks
7) Further Reading
8) Contact
---------------------------------------------------------------------
Introduction
tagfs is used to organize your files using tags.
This document contains basic usage instructions for users. To develop or debug
tagfs see the README.dev file.
---------------------------------------------------------------------
Requirements
* python 2.5, 2.6, 2.7
* Linux kernel with fuse enabled
* python-fuse installed
---------------------------------------------------------------------
Installation
To install tagfs into your home directory type the following:
$ python setup.py test e2e_test install --home ~/.local
If you haven't already extended your local python path then add the following
to your environment configuration script. For example to your ~/.bashrc:
$ export PYTHONPATH=~/.local/lib/python:$PYTHONPATH
You may also need to add ~/.local/bin to your PATH environment variable:
$ export PATH=~/.local/bin:$PATH
---------------------------------------------------------------------
Tagging Files
Before you can filter anything using tagfs you need to tag your items. An item
is a directory which contains a file called .tag. All items must be below one
directory.
Let's create a simple item structure.
First we create the root directory for all items:
$ mkdir items
Then we create our first item:
$ mkdir items/Ted
We tag the 'Ted' item as movie:
$ echo movie >> items/Ted/.tag
We also tag 'Ted' as genre comedy:
$ echo 'genre: comedy' >> items/Ted/.tag
Then we add a second item:
$ mkdir items/banana
$ echo fruit >> items/banana/.tag
$ echo 'genre: delicious' >> items/banana/.tag
Modifying .tag files using echo, grep, sed may be a little hard sometimes.
There are some convenience scripts available through the tagfs-utils project.
See https://github.com/marook/tagfs-utils for details.
---------------------------------------------------------------------
Usage
After installation tagfs can be started the following way.
Mount a tagged directory:
$ tagfs -i /path/to/my/items/directory /path/to/my/mount/point
Unmount a tagged directory:
$ fusermount -u /path/to/my/mount/point
Right now tagfs reads the taggings only when it's getting mounted. So if you
modify the tags after mounting you will not see any changes in the tagfs file
system.
In general tagfs will try to reduce the number of filter directories below the
virtual file system. That's why you may not see some filters which would not
reduce the number of selected items.
---------------------------------------------------------------------
Configuration
tagfs can be configured through configuration files. Configuration files are
searched in different locations by tagfs. The following locations are used.
Locations with higher priority come first:
- <items directory>/.tagfs/tagfs.conf
- ~/.tagfs/tagfs.conf
- /etc/tagfs/tagfs.conf
Right now the following configuration options are supported.
---------------------------------------------------------------------
Configuration - Options - tagFileName
Through this option the name of the parsed tag files can be specified. The
default value is '.tag'.
Example:
[global]
tagFileName = ABOUT
---------------------------------------------------------------------
Configuration - Options - enableValueFilters
You can enable or disable value filters. If you enable value filters you will
see filter directories for each tag value. For value filters the tag's
context can be anyone. The default value is 'false'.
Example:
[global]
enableValueFilters = true
---------------------------------------------------------------------
Configuration - Options - enableRootItemLinks
To show links to all items in the tagfs '/' directory enable this option. The
default value is 'false'.
Example:
[global]
enableRootItemLinks = true
---------------------------------------------------------------------
Further Reading
Using a file system for my bank account (Markus Pielmeier)
http://pielmeier.blogspot.com/2010/08/using-file-system-for-my-bank-account.html
---------------------------------------------------------------------
Contact
* homepage: http://wiki.github.com/marook/tagfs
* user group: http://groups.google.com/group/tagfs
-* author: Markus Pielmeier <[email protected]>
+* author: Markus Pielmeier <[email protected]>
|
marook/tagfs
|
022c0ff1d4a57f1cacbf40e452d2e70263dd2200
|
added warning about freebase syntax
|
diff --git a/README b/README
index d8e7baf..986ddd6 100644
--- a/README
+++ b/README
@@ -1,177 +1,180 @@
tagfs - tag file system
1) Introduction
2) Requirements
3) Installation
4) Tagging Files
5) Usage
6) Configuration
6.1) Options
6.1.1) tagFileName
6.1.2) enableValueFilters
6.1.3) enableRootItemLinks
7) Freebase Integration
8) Further Reading
9) Contact
---------------------------------------------------------------------
Introduction
tagfs is used to organize your files using tags.
This document contains basic usage instructions for users. To develop or debug
tagfs see the README.dev file.
---------------------------------------------------------------------
Requirements
* python 2.5, 2.6, 2.7
* Linux kernel with fuse enabled
* python-fuse installed
---------------------------------------------------------------------
Installation
To install tagfs into your home directory type the following:
$ python setup.py test e2e_test install --home ~/.local
If you haven't already extended your local python path then add the following
to your environment configuration script. For example to your ~/.bashrc:
$ export PYTHONPATH=~/.local/lib/python:$PYTHONPATH
You may also need to add ~/.local/bin to your PATH environment variable:
$ export PATH=~/.local/bin:$PATH
---------------------------------------------------------------------
Tagging Files
Before you can filter anything using tagfs you need to tag your items. An item
is a directory which contains a file called .tag. All items must be below one
directory.
Let's create a simple item structure.
First we create the root directory for all items:
$ mkdir items
Then we create our first item:
$ mkdir items/Ted
We tag the 'Ted' item as movie:
$ echo movie >> items/Ted/.tag
We also tag 'Ted' as genre comedy:
$ echo 'genre: comedy' >> items/Ted/.tag
Then we add a second item:
$ mkdir items/banana
$ echo fruit >> items/banana/.tag
$ echo 'genre: delicious' >> items/banana/.tag
Modifying .tag files using echo, grep, sed may be a little hard sometimes.
There are some convenience scripts available through the tagfs-utils project.
See https://github.com/marook/tagfs-utils for details.
---------------------------------------------------------------------
Usage
After installation tagfs can be started the following way.
Mount a tagged directory:
$ tagfs -i /path/to/my/items/directory /path/to/my/mount/point
Unmount a tagged directory:
$ fusermount -u /path/to/my/mount/point
Right now tagfs reads the taggings only when it's getting mounted. So if you
modify the tags after mounting you will not see any changes in the tagfs file
system.
In general tagfs will try to reduce the number of filter directories below the
virtual file system. That's why you may not see some filters which would not
reduce the number of selected items.
---------------------------------------------------------------------
Configuration
tagfs can be configured through configuration files. Configuration files are
searched in different locations by tagfs. The following locations are used.
Locations with higher priority come first:
- <items directory>/.tagfs/tagfs.conf
- ~/.tagfs/tagfs.conf
- /etc/tagfs/tagfs.conf
Right now the following configuration options are supported.
---------------------------------------------------------------------
Configuration - Options - tagFileName
Through this option the name of the parsed tag files can be specified. The
default value is '.tag'.
Example:
[global]
tagFileName = ABOUT
---------------------------------------------------------------------
Configuration - Options - enableValueFilters
You can enable or disable value filters. If you enable value filters you will
see filter directories for each tag value. For value filters the tag's
context can be anyone. The default value is 'false'.
Example:
[global]
enableValueFilters = true
---------------------------------------------------------------------
Configuration - Options - enableRootItemLinks
To show links to all items in the tagfs '/' directory enable this option. The
default value is 'false'.
Example:
[global]
enableRootItemLinks = true
---------------------------------------------------------------------
Freebase Integration
Freebase is an open graph of people, places and things. tagfs allows you to
extend your own taggings with data directly from the freebase graph.
+WARNING! Freebase support is currently experimental. It is very likely that the
+freebase syntax within the .tag files will change in future releases of tagfs.
+
In order to use freebase you need to install the freebase-python bindings. They
are available via https://code.google.com/p/freebase-python/
TODO howto query freebase
---------------------------------------------------------------------
Further Reading
Using a file system for my bank account (Markus Pielmeier)
http://pielmeier.blogspot.com/2010/08/using-file-system-for-my-bank-account.html
---------------------------------------------------------------------
Contact
* homepage: http://wiki.github.com/marook/tagfs
* user group: http://groups.google.com/group/tagfs
* author: Markus Pielmeier <[email protected]>
|
marook/tagfs
|
c6e91b52784c5ea49f1c61c1919faaaf20e07fe6
|
prepared parseTagsFromFile in Items to be mocked
|
diff --git a/src/modules/tagfs/item_access.py b/src/modules/tagfs/item_access.py
index 2f400e8..569e591 100644
--- a/src/modules/tagfs/item_access.py
+++ b/src/modules/tagfs/item_access.py
@@ -1,296 +1,297 @@
#
# Copyright 2009 Markus Pielmeier
#
# This file is part of tagfs.
#
# tagfs is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# tagfs is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with tagfs. If not, see <http://www.gnu.org/licenses/>.
import logging
import os
import time
import traceback
from cache import cache
import tagfs.sysIO as sysIO
class Tag(object):
def __init__(self, value, context = None):
if context == None:
self.context = None
else:
self.context = context.strip()
self.value = value.strip()
if not self.context == None and len(self.context) == 0:
# we don't allow empty strings as they can't be represented as a
# directory very well
raise ValueError()
if len(self.value) == 0:
# we don't allow empty strings as they can't be represented as a
# directory very well
raise ValueError()
def __hash__(self):
return (self.context, self.value).__hash__()
def __eq__(self, other):
return self.value == other.value and self.context == other.context
def __repr__(self):
return '<Tag %s: %s>' % (self.context, self.value)
def parseTagsFromFile(system, tagFileName):
"""Parses the tags from the specified file.
@return: The parsed values are returned as a set containing Tag objects.
@see: Tag
"""
tags = set()
with system.open(tagFileName, 'r') as tagFile:
for rawTag in tagFile:
rawTag = rawTag.strip()
try:
if len(rawTag) == 0:
continue
tagTuple = rawTag.split(':', 1)
if len(tagTuple) == 1:
tagContext = None
tagValue = tagTuple[0]
else:
tagContext = tagTuple[0]
tagValue = tagTuple[1]
tag = Tag(tagValue, context = tagContext)
tags.add(tag)
except:
logging.warning('Skipping tagging \'%s\' from file \'%s\' as it can\'t be parsed\n%s.' % (rawTag, tagFileName, traceback.format_exc()))
return tags
class Item(object):
- def __init__(self, name, system, itemAccess):
+ def __init__(self, name, system, itemAccess, parseTagsFromFile = parseTagsFromFile):
self.name = name
self.system = system
self.itemAccess = itemAccess
+ self.parseTagsFromFile = parseTagsFromFile
# TODO register at file system to receive tag file change events.
@property
@cache
def itemDirectory(self):
return os.path.join(self.itemAccess.dataDirectory, self.name)
@property
@cache
def _tagFileName(self):
"""Returns the name of the tag file for this item.
"""
itemDirectory = self.itemDirectory
return os.path.join(itemDirectory, self.itemAccess.tagFileName)
def __parseTags(self):
tagFileName = self._tagFileName
if not os.path.exists(tagFileName):
return None
- return parseTagsFromFile(self.system, tagFileName)
+ return self.parseTagsFromFile(self.system, tagFileName)
@property
@cache
def tagsCreationTime(self):
tagFileName = self._tagFileName
if not os.path.exists(tagFileName):
return None
return os.path.getctime(self._tagFileName)
@property
@cache
def tagsModificationTime(self):
"""Returns the last time when the tags have been modified.
"""
tagFileName = self._tagFileName
if not os.path.exists(tagFileName):
return None
return os.path.getmtime(tagFileName)
@property
@cache
def tags(self):
"""Returns the tags as a list for this item.
"""
return self.__parseTags()
@property
def values(self):
for t in self.tags:
yield t.value
def getTagsByContext(self, context):
for t in self.tags:
if context != t.context:
continue
yield t
def isTaggedWithContextValue(self, context, value):
for t in self.getTagsByContext(context):
if value == t.value:
return True
return False
def isTaggedWithContext(self, context):
# TODO don't create whole list... just check wheather list is empty
return (len([c for c in self.getTagsByContext(context)]) > 0)
def isTaggedWithValue(self, value):
for v in self.values:
if value == v:
return True
return False
@property
@cache
def tagged(self):
return os.path.exists(self._tagFileName)
def __repr__(self):
return '<Item %s>' % self.name
class ItemAccess(object):
"""This is the access point to the Items.
"""
def __init__(self, system, dataDirectory, tagFileName):
self.system = system
self.dataDirectory = dataDirectory
self.tagFileName = tagFileName
self.__items = None
self.__tags = None
self.__taggedItems = None
self.__untaggedItems = None
self.parseTime = 0
def __parseItems(self):
items = {}
logging.debug('Start parsing items from dir: %s', self.dataDirectory)
for itemName in os.listdir(self.dataDirectory):
if itemName == '.tagfs':
# skip directory with configuration
continue
try:
item = Item(itemName, self.system, self)
items[itemName] = item
except IOError, (error, strerror):
logging.error('Can \'t read tags for item %s: %s',
itemName,
strerror)
logging.debug('Found %s items', len(items))
self.parseTime = time.time()
return items
@property
@cache
def items(self):
return self.__parseItems()
@property
@cache
def tags(self):
tags = set()
for item in self.items.itervalues():
if not item.tagged:
continue
tags = tags | item.tags
return tags
@property
@cache
def taggedItems(self):
return set([item for item in self.items.itervalues() if item.tagged])
@property
@cache
def untaggedItems(self):
return set([item for item in self.items.itervalues() if not item.tagged])
def getItemDirectory(self, item):
return os.path.join(self.dataDirectory, item)
def contextTags(self, context):
contextTags = set()
for tag in self.tags:
if tag.context == context:
contextTags.add(tag)
return contextTags
@property
@cache
def contexts(self):
contexts = set()
for tag in self.tags:
if tag.context == None:
continue
contexts.add(tag.context)
return contexts
@property
@cache
def values(self):
values = set()
for tag in self.tags:
values.add(tag.value)
return values
def __str__(self):
return '[' + ', '.join([field + ': ' + str(self.__dict__[field]) for field in ['dataDirectory', 'tagFileName']]) + ']'
|
marook/tagfs
|
cd6ca9b04c7294d8b9eb3cd8f47cbb9e1370d5ee
|
added unit tests for withspace tagging parsing
|
diff --git a/src/test/tagfs_test_small/test_item_access_parseTagsFromFile.py b/src/test/tagfs_test_small/test_item_access_parseTagsFromFile.py
index ee398d5..ab29a4d 100644
--- a/src/test/tagfs_test_small/test_item_access_parseTagsFromFile.py
+++ b/src/test/tagfs_test_small/test_item_access_parseTagsFromFile.py
@@ -1,44 +1,54 @@
#
# Copyright 2012 Markus Pielmeier
#
# This file is part of tagfs.
#
# tagfs is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# tagfs is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with tagfs. If not, see <http://www.gnu.org/licenses/>.
import unittest
import tagfs.item_access as item_access
import systemMocks
class ParseTagsFromFileTest(unittest.TestCase):
def setUp(self):
super(ParseTagsFromFileTest, self).setUp()
self.system = systemMocks.SystemMock(self)
def setTagFileContent(self, lines):
self.system.readFiles['.tag'] = systemMocks.ReadLineFileMock(lines)
def assertParseTags(self, expectedTags):
self.assertEqual(list(item_access.parseTagsFromFile(self.system, '.tag')), expectedTags)
def testParseTagWithoutContext(self):
self.setTagFileContent(['value',])
self.assertParseTags([item_access.Tag('value'),])
def testParseTagWithContext(self):
self.setTagFileContent(['context: value',])
self.assertParseTags([item_access.Tag('value', 'context'),])
+
+ def testIgnoreEmptyLines(self):
+ self.setTagFileContent(['',])
+
+ self.assertParseTags([])
+
+ def testIgnoreLinesWithJustSpaces(self):
+ self.setTagFileContent(['\t ',])
+
+ self.assertParseTags([])
|
marook/tagfs
|
a992946c24b2f264941e85fd1853a29c867a9251
|
added unittest for parsing tagging with context
|
diff --git a/src/test/tagfs_test_small/test_item_access_parseTagsFromFile.py b/src/test/tagfs_test_small/test_item_access_parseTagsFromFile.py
index 6425e29..ee398d5 100644
--- a/src/test/tagfs_test_small/test_item_access_parseTagsFromFile.py
+++ b/src/test/tagfs_test_small/test_item_access_parseTagsFromFile.py
@@ -1,39 +1,44 @@
#
# Copyright 2012 Markus Pielmeier
#
# This file is part of tagfs.
#
# tagfs is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# tagfs is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with tagfs. If not, see <http://www.gnu.org/licenses/>.
import unittest
import tagfs.item_access as item_access
import systemMocks
class ParseTagsFromFileTest(unittest.TestCase):
def setUp(self):
super(ParseTagsFromFileTest, self).setUp()
self.system = systemMocks.SystemMock(self)
def setTagFileContent(self, lines):
self.system.readFiles['.tag'] = systemMocks.ReadLineFileMock(lines)
def assertParseTags(self, expectedTags):
self.assertEqual(list(item_access.parseTagsFromFile(self.system, '.tag')), expectedTags)
def testParseTagWithoutContext(self):
self.setTagFileContent(['value',])
self.assertParseTags([item_access.Tag('value'),])
+
+ def testParseTagWithContext(self):
+ self.setTagFileContent(['context: value',])
+
+ self.assertParseTags([item_access.Tag('value', 'context'),])
|
marook/tagfs
|
9418c357fc3abdf32eebdaa2cc94438773e9c036
|
added GPL header to test case
|
diff --git a/src/test/tagfs_test_small/test_item_access_parseTagsFromFile.py b/src/test/tagfs_test_small/test_item_access_parseTagsFromFile.py
index 2c1446e..6425e29 100644
--- a/src/test/tagfs_test_small/test_item_access_parseTagsFromFile.py
+++ b/src/test/tagfs_test_small/test_item_access_parseTagsFromFile.py
@@ -1,21 +1,39 @@
+#
+# Copyright 2012 Markus Pielmeier
+#
+# This file is part of tagfs.
+#
+# tagfs is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# tagfs is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with tagfs. If not, see <http://www.gnu.org/licenses/>.
+
import unittest
import tagfs.item_access as item_access
import systemMocks
class ParseTagsFromFileTest(unittest.TestCase):
def setUp(self):
super(ParseTagsFromFileTest, self).setUp()
self.system = systemMocks.SystemMock(self)
def setTagFileContent(self, lines):
self.system.readFiles['.tag'] = systemMocks.ReadLineFileMock(lines)
def assertParseTags(self, expectedTags):
self.assertEqual(list(item_access.parseTagsFromFile(self.system, '.tag')), expectedTags)
def testParseTagWithoutContext(self):
self.setTagFileContent(['value',])
self.assertParseTags([item_access.Tag('value'),])
|
marook/tagfs
|
8968febb19d5784c468fe01ae0cd587ceed4112b
|
implemented unit test for parseTagsFromFile
|
diff --git a/src/modules/tagfs/item_access.py b/src/modules/tagfs/item_access.py
index c9e3d2e..2f400e8 100644
--- a/src/modules/tagfs/item_access.py
+++ b/src/modules/tagfs/item_access.py
@@ -1,300 +1,296 @@
#
# Copyright 2009 Markus Pielmeier
#
# This file is part of tagfs.
#
# tagfs is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# tagfs is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with tagfs. If not, see <http://www.gnu.org/licenses/>.
import logging
import os
import time
import traceback
from cache import cache
import tagfs.sysIO as sysIO
class Tag(object):
def __init__(self, value, context = None):
if context == None:
self.context = None
else:
self.context = context.strip()
self.value = value.strip()
if not self.context == None and len(self.context) == 0:
# we don't allow empty strings as they can't be represented as a
# directory very well
raise ValueError()
if len(self.value) == 0:
# we don't allow empty strings as they can't be represented as a
# directory very well
raise ValueError()
def __hash__(self):
return (self.context, self.value).__hash__()
def __eq__(self, other):
return self.value == other.value and self.context == other.context
def __repr__(self):
return '<Tag %s: %s>' % (self.context, self.value)
def parseTagsFromFile(system, tagFileName):
"""Parses the tags from the specified file.
@return: The parsed values are returned as a set containing Tag objects.
@see: Tag
"""
tags = set()
- tagFile = system.open(tagFileName, 'r')
- try:
- for rawTag in tagFile.readlines():
+ with system.open(tagFileName, 'r') as tagFile:
+ for rawTag in tagFile:
rawTag = rawTag.strip()
try:
if len(rawTag) == 0:
continue
tagTuple = rawTag.split(':', 1)
if len(tagTuple) == 1:
tagContext = None
tagValue = tagTuple[0]
else:
tagContext = tagTuple[0]
tagValue = tagTuple[1]
tag = Tag(tagValue, context = tagContext)
tags.add(tag)
except:
logging.warning('Skipping tagging \'%s\' from file \'%s\' as it can\'t be parsed\n%s.' % (rawTag, tagFileName, traceback.format_exc()))
-
- finally:
- tagFile.close()
return tags
class Item(object):
def __init__(self, name, system, itemAccess):
self.name = name
self.system = system
self.itemAccess = itemAccess
# TODO register at file system to receive tag file change events.
@property
@cache
def itemDirectory(self):
return os.path.join(self.itemAccess.dataDirectory, self.name)
@property
@cache
def _tagFileName(self):
"""Returns the name of the tag file for this item.
"""
itemDirectory = self.itemDirectory
return os.path.join(itemDirectory, self.itemAccess.tagFileName)
def __parseTags(self):
tagFileName = self._tagFileName
if not os.path.exists(tagFileName):
return None
return parseTagsFromFile(self.system, tagFileName)
@property
@cache
def tagsCreationTime(self):
tagFileName = self._tagFileName
if not os.path.exists(tagFileName):
return None
return os.path.getctime(self._tagFileName)
@property
@cache
def tagsModificationTime(self):
"""Returns the last time when the tags have been modified.
"""
tagFileName = self._tagFileName
if not os.path.exists(tagFileName):
return None
return os.path.getmtime(tagFileName)
@property
@cache
def tags(self):
"""Returns the tags as a list for this item.
"""
return self.__parseTags()
@property
def values(self):
for t in self.tags:
yield t.value
def getTagsByContext(self, context):
for t in self.tags:
if context != t.context:
continue
yield t
def isTaggedWithContextValue(self, context, value):
for t in self.getTagsByContext(context):
if value == t.value:
return True
return False
def isTaggedWithContext(self, context):
# TODO don't create whole list... just check wheather list is empty
return (len([c for c in self.getTagsByContext(context)]) > 0)
def isTaggedWithValue(self, value):
for v in self.values:
if value == v:
return True
return False
@property
@cache
def tagged(self):
return os.path.exists(self._tagFileName)
def __repr__(self):
return '<Item %s>' % self.name
class ItemAccess(object):
"""This is the access point to the Items.
"""
def __init__(self, system, dataDirectory, tagFileName):
self.system = system
self.dataDirectory = dataDirectory
self.tagFileName = tagFileName
self.__items = None
self.__tags = None
self.__taggedItems = None
self.__untaggedItems = None
self.parseTime = 0
def __parseItems(self):
items = {}
logging.debug('Start parsing items from dir: %s', self.dataDirectory)
for itemName in os.listdir(self.dataDirectory):
if itemName == '.tagfs':
# skip directory with configuration
continue
try:
item = Item(itemName, self.system, self)
items[itemName] = item
except IOError, (error, strerror):
logging.error('Can \'t read tags for item %s: %s',
itemName,
strerror)
logging.debug('Found %s items', len(items))
self.parseTime = time.time()
return items
@property
@cache
def items(self):
return self.__parseItems()
@property
@cache
def tags(self):
tags = set()
for item in self.items.itervalues():
if not item.tagged:
continue
tags = tags | item.tags
return tags
@property
@cache
def taggedItems(self):
return set([item for item in self.items.itervalues() if item.tagged])
@property
@cache
def untaggedItems(self):
return set([item for item in self.items.itervalues() if not item.tagged])
def getItemDirectory(self, item):
return os.path.join(self.dataDirectory, item)
def contextTags(self, context):
contextTags = set()
for tag in self.tags:
if tag.context == context:
contextTags.add(tag)
return contextTags
@property
@cache
def contexts(self):
contexts = set()
for tag in self.tags:
if tag.context == None:
continue
contexts.add(tag.context)
return contexts
@property
@cache
def values(self):
values = set()
for tag in self.tags:
values.add(tag.value)
return values
def __str__(self):
return '[' + ', '.join([field + ': ' + str(self.__dict__[field]) for field in ['dataDirectory', 'tagFileName']]) + ']'
diff --git a/src/test/tagfs_test_small/systemMocks.py b/src/test/tagfs_test_small/systemMocks.py
new file mode 100644
index 0000000..a6c1058
--- /dev/null
+++ b/src/test/tagfs_test_small/systemMocks.py
@@ -0,0 +1,40 @@
+#
+# Copyright 2012 Markus Pielmeier
+#
+# This file is part of tagfs.
+#
+# tagfs is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# tagfs is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with tagfs. If not, see <http://www.gnu.org/licenses/>.
+
+class ReadLineFileMock(object):
+
+ def __init__(self, lines):
+ self.lines = lines
+
+ def __enter__(self, *args, **kwargs):
+ return self.lines
+
+ def __exit__(self, *args, **kwargs):
+ pass
+
+class SystemMock(object):
+
+ def __init__(self, test, readFiles = {}):
+ self.test = test
+ self.readFiles = readFiles
+
+ def open(self, fileName, mode):
+ if(mode == 'r'):
+ return self.readFiles[fileName]
+
+ self.test.fail('Unknown file mode %s' % mode)
diff --git a/src/test/tagfs_test_small/test_item_access_parseTagsFromFile.py b/src/test/tagfs_test_small/test_item_access_parseTagsFromFile.py
new file mode 100644
index 0000000..2c1446e
--- /dev/null
+++ b/src/test/tagfs_test_small/test_item_access_parseTagsFromFile.py
@@ -0,0 +1,21 @@
+import unittest
+import tagfs.item_access as item_access
+import systemMocks
+
+class ParseTagsFromFileTest(unittest.TestCase):
+
+ def setUp(self):
+ super(ParseTagsFromFileTest, self).setUp()
+
+ self.system = systemMocks.SystemMock(self)
+
+ def setTagFileContent(self, lines):
+ self.system.readFiles['.tag'] = systemMocks.ReadLineFileMock(lines)
+
+ def assertParseTags(self, expectedTags):
+ self.assertEqual(list(item_access.parseTagsFromFile(self.system, '.tag')), expectedTags)
+
+ def testParseTagWithoutContext(self):
+ self.setTagFileContent(['value',])
+
+ self.assertParseTags([item_access.Tag('value'),])
|
marook/tagfs
|
d461424bd8f50d6943d0c91c2d1759717c859d78
|
wrapped calls to 'open' to be ready for unit test mocking
|
diff --git a/src/modules/tagfs/item_access.py b/src/modules/tagfs/item_access.py
index 7e05447..c9e3d2e 100644
--- a/src/modules/tagfs/item_access.py
+++ b/src/modules/tagfs/item_access.py
@@ -1,297 +1,300 @@
#
# Copyright 2009 Markus Pielmeier
#
# This file is part of tagfs.
#
# tagfs is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# tagfs is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with tagfs. If not, see <http://www.gnu.org/licenses/>.
import logging
import os
import time
import traceback
from cache import cache
+import tagfs.sysIO as sysIO
class Tag(object):
def __init__(self, value, context = None):
if context == None:
self.context = None
else:
self.context = context.strip()
self.value = value.strip()
if not self.context == None and len(self.context) == 0:
# we don't allow empty strings as they can't be represented as a
# directory very well
raise ValueError()
if len(self.value) == 0:
# we don't allow empty strings as they can't be represented as a
# directory very well
raise ValueError()
def __hash__(self):
return (self.context, self.value).__hash__()
def __eq__(self, other):
return self.value == other.value and self.context == other.context
def __repr__(self):
return '<Tag %s: %s>' % (self.context, self.value)
-def parseTagsFromFile(tagFileName):
+def parseTagsFromFile(system, tagFileName):
"""Parses the tags from the specified file.
@return: The parsed values are returned as a set containing Tag objects.
@see: Tag
"""
tags = set()
- tagFile = open(tagFileName, 'r')
+ tagFile = system.open(tagFileName, 'r')
try:
for rawTag in tagFile.readlines():
rawTag = rawTag.strip()
try:
if len(rawTag) == 0:
continue
tagTuple = rawTag.split(':', 1)
if len(tagTuple) == 1:
tagContext = None
tagValue = tagTuple[0]
else:
tagContext = tagTuple[0]
tagValue = tagTuple[1]
tag = Tag(tagValue, context = tagContext)
tags.add(tag)
except:
logging.warning('Skipping tagging \'%s\' from file \'%s\' as it can\'t be parsed\n%s.' % (rawTag, tagFileName, traceback.format_exc()))
finally:
tagFile.close()
return tags
class Item(object):
- def __init__(self, name, itemAccess):
+ def __init__(self, name, system, itemAccess):
self.name = name
+ self.system = system
self.itemAccess = itemAccess
# TODO register at file system to receive tag file change events.
@property
@cache
def itemDirectory(self):
return os.path.join(self.itemAccess.dataDirectory, self.name)
@property
@cache
def _tagFileName(self):
"""Returns the name of the tag file for this item.
"""
itemDirectory = self.itemDirectory
return os.path.join(itemDirectory, self.itemAccess.tagFileName)
def __parseTags(self):
tagFileName = self._tagFileName
if not os.path.exists(tagFileName):
return None
- return parseTagsFromFile(tagFileName)
+ return parseTagsFromFile(self.system, tagFileName)
@property
@cache
def tagsCreationTime(self):
tagFileName = self._tagFileName
if not os.path.exists(tagFileName):
return None
return os.path.getctime(self._tagFileName)
@property
@cache
def tagsModificationTime(self):
"""Returns the last time when the tags have been modified.
"""
tagFileName = self._tagFileName
if not os.path.exists(tagFileName):
return None
return os.path.getmtime(tagFileName)
@property
@cache
def tags(self):
"""Returns the tags as a list for this item.
"""
return self.__parseTags()
@property
def values(self):
for t in self.tags:
yield t.value
def getTagsByContext(self, context):
for t in self.tags:
if context != t.context:
continue
yield t
def isTaggedWithContextValue(self, context, value):
for t in self.getTagsByContext(context):
if value == t.value:
return True
return False
def isTaggedWithContext(self, context):
# TODO don't create whole list... just check wheather list is empty
return (len([c for c in self.getTagsByContext(context)]) > 0)
def isTaggedWithValue(self, value):
for v in self.values:
if value == v:
return True
return False
@property
@cache
def tagged(self):
return os.path.exists(self._tagFileName)
def __repr__(self):
return '<Item %s>' % self.name
class ItemAccess(object):
"""This is the access point to the Items.
"""
- def __init__(self, dataDirectory, tagFileName):
+ def __init__(self, system, dataDirectory, tagFileName):
+ self.system = system
self.dataDirectory = dataDirectory
self.tagFileName = tagFileName
self.__items = None
self.__tags = None
self.__taggedItems = None
self.__untaggedItems = None
self.parseTime = 0
def __parseItems(self):
items = {}
logging.debug('Start parsing items from dir: %s', self.dataDirectory)
for itemName in os.listdir(self.dataDirectory):
if itemName == '.tagfs':
# skip directory with configuration
continue
try:
- item = Item(itemName, self)
+ item = Item(itemName, self.system, self)
items[itemName] = item
except IOError, (error, strerror):
logging.error('Can \'t read tags for item %s: %s',
itemName,
strerror)
logging.debug('Found %s items', len(items))
self.parseTime = time.time()
return items
@property
@cache
def items(self):
return self.__parseItems()
@property
@cache
def tags(self):
tags = set()
for item in self.items.itervalues():
if not item.tagged:
continue
tags = tags | item.tags
return tags
@property
@cache
def taggedItems(self):
return set([item for item in self.items.itervalues() if item.tagged])
@property
@cache
def untaggedItems(self):
return set([item for item in self.items.itervalues() if not item.tagged])
def getItemDirectory(self, item):
return os.path.join(self.dataDirectory, item)
def contextTags(self, context):
contextTags = set()
for tag in self.tags:
if tag.context == context:
contextTags.add(tag)
return contextTags
@property
@cache
def contexts(self):
contexts = set()
for tag in self.tags:
if tag.context == None:
continue
contexts.add(tag.context)
return contexts
@property
@cache
def values(self):
values = set()
for tag in self.tags:
values.add(tag.value)
return values
def __str__(self):
return '[' + ', '.join([field + ': ' + str(self.__dict__[field]) for field in ['dataDirectory', 'tagFileName']]) + ']'
diff --git a/src/modules/tagfs/main.py b/src/modules/tagfs/main.py
index a76dead..c73ba6f 100644
--- a/src/modules/tagfs/main.py
+++ b/src/modules/tagfs/main.py
@@ -1,180 +1,182 @@
#!/usr/bin/env python
#
# Copyright 2009, 2010 Markus Pielmeier
#
# This file is part of tagfs.
#
# tagfs is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# tagfs is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with tagfs. If not, see <http://www.gnu.org/licenses/>.
#
#
# = tag fs =
# == glossary ==
# * item: An item is a directory in the item container directory. Items can be
# tagged using a tag file.
# * tag: A tag is a text string which can be assigned to an item. Tags can
# consist of any character except newlines.
import os
import stat
import errno
import exceptions
import time
import functools
import logging
import fuse
if not hasattr(fuse, '__version__'):
raise RuntimeError, \
"your fuse-py doesn't know of fuse.__version__, probably it's too old."
fuse.fuse_python_api = (0, 2)
from view import View
from cache import cache
from item_access import ItemAccess
from config import parseConfig
from log import logException
+
+import tagfs.sysIO as sysIO
class TagFS(fuse.Fuse):
def __init__(self, initwd, *args, **kw):
fuse.Fuse.__init__(self, *args, **kw)
self._initwd = initwd
self._itemsRoot = None
# TODO change command line arguments structure
# goal: tagfs <items dir> <mount dir>
self.parser.add_option('-i',
'--items-dir',
dest = 'itemsDir',
help = 'items directory',
metavar = 'dir')
self.parser.add_option('-t',
'--tag-file',
dest = 'tagFileName',
help = 'tag file name',
metavar = 'file',
default = None)
self.parser.add_option('--value-filter',
action = 'store_true',
dest = 'enableValueFilters',
help = 'Displays value filter directories on toplevel instead of only context entries',
default = None)
self.parser.add_option('--root-items',
action = 'store_true',
dest = 'enableRootItemLinks',
help = 'Display item links in tagfs root directory.',
default = None)
def getItemAccess(self):
# Maybe we should move the parser run from main here.
# Or we should at least check if it was run once...
opts, args = self.cmdline
# Maybe we should add expand user? Maybe even vars???
assert opts.itemsDir != None and opts.itemsDir != ''
itemsRoot = os.path.normpath(
os.path.join(self._initwd, opts.itemsDir))
# TODO rel https://github.com/marook/tagfs/issues#issue/2
# Ensure that mount-point and items dir are disjoined.
# Something along
# assert not os.path.normpath(itemsDir).startswith(itemsRoot)
# try/except here?
try:
- return ItemAccess(itemsRoot, self.config.tagFileName)
+ return ItemAccess(sysIO.createSystem(), itemsRoot, self.config.tagFileName)
except OSError, e:
logging.error("Can't create item access from items directory %s. Reason: %s",
itemsRoot, str(e.strerror))
raise
@property
@cache
def config(self):
opts, args = self.cmdline
c = parseConfig(os.path.normpath(os.path.join(self._initwd, opts.itemsDir)))
if opts.tagFileName:
c.tagFileName = opts.tagFileName
if opts.enableValueFilters:
c.enableValueFilters = opts.enableValueFilters
if opts.enableRootItemLinks:
c.enableRootItemLinks = opts.enableRootItemLinks
logging.debug('Using configuration %s' % c)
return c
@property
@cache
def view(self):
itemAccess = self.getItemAccess()
return View(itemAccess, self.config)
@logException
def getattr(self, path):
return self.view.getattr(path)
@logException
def readdir(self, path, offset):
return self.view.readdir(path, offset)
@logException
def readlink(self, path):
return self.view.readlink(path)
@logException
def open(self, path, flags):
return self.view.open(path, flags)
@logException
def read(self, path, size, offset):
return self.view.read(path, size, offset)
@logException
def write(self, path, data, pos):
return self.view.write(path, data, pos)
@logException
def symlink(self, path, linkPath):
return self.view.symlink(path, linkPath)
def main():
fs = TagFS(os.getcwd(),
version = "%prog " + fuse.__version__,
dash_s_do = 'setsingle')
fs.parse(errex = 1)
opts, args = fs.cmdline
if opts.itemsDir == None:
fs.parser.print_help()
# items dir should probably be an arg, not an option.
print "Error: Missing items directory option."
# Quickfix rel https://github.com/marook/tagfs/issues/#issue/3
# FIXME: since we run main via sys.exit(main()), this should
# probably be handled via some return code.
import sys
sys.exit()
return fs.main()
if __name__ == '__main__':
import sys
sys.exit(main())
diff --git a/src/modules/tagfs/sysIO.py b/src/modules/tagfs/sysIO.py
new file mode 100644
index 0000000..71a37cb
--- /dev/null
+++ b/src/modules/tagfs/sysIO.py
@@ -0,0 +1,29 @@
+#
+# Copyright 2012 Markus Pielmeier
+#
+# This file is part of tagfs.
+#
+# tagfs is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# tagfs is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with tagfs. If not, see <http://www.gnu.org/licenses/>.
+
+def createSystem():
+ return System(open = open)
+
+class System(object):
+ '''Abstraction layer for system access.
+
+ This class can be used to mock system access in tests.
+ '''
+
+ def __init__(self, open = None):
+ self.open = open
|
marook/tagfs
|
3fe72a56df0da237b695196931973a1d86b7e6a1
|
added freebase section to README
|
diff --git a/README b/README
index 024f7c8..d8e7baf 100644
--- a/README
+++ b/README
@@ -1,164 +1,177 @@
tagfs - tag file system
1) Introduction
2) Requirements
3) Installation
4) Tagging Files
5) Usage
6) Configuration
6.1) Options
6.1.1) tagFileName
6.1.2) enableValueFilters
6.1.3) enableRootItemLinks
-7) Further Reading
-8) Contact
+7) Freebase Integration
+8) Further Reading
+9) Contact
---------------------------------------------------------------------
Introduction
tagfs is used to organize your files using tags.
This document contains basic usage instructions for users. To develop or debug
tagfs see the README.dev file.
---------------------------------------------------------------------
Requirements
* python 2.5, 2.6, 2.7
* Linux kernel with fuse enabled
* python-fuse installed
---------------------------------------------------------------------
Installation
To install tagfs into your home directory type the following:
$ python setup.py test e2e_test install --home ~/.local
If you haven't already extended your local python path then add the following
to your environment configuration script. For example to your ~/.bashrc:
$ export PYTHONPATH=~/.local/lib/python:$PYTHONPATH
You may also need to add ~/.local/bin to your PATH environment variable:
$ export PATH=~/.local/bin:$PATH
---------------------------------------------------------------------
Tagging Files
Before you can filter anything using tagfs you need to tag your items. An item
is a directory which contains a file called .tag. All items must be below one
directory.
Let's create a simple item structure.
First we create the root directory for all items:
$ mkdir items
Then we create our first item:
$ mkdir items/Ted
We tag the 'Ted' item as movie:
$ echo movie >> items/Ted/.tag
We also tag 'Ted' as genre comedy:
$ echo 'genre: comedy' >> items/Ted/.tag
Then we add a second item:
$ mkdir items/banana
$ echo fruit >> items/banana/.tag
$ echo 'genre: delicious' >> items/banana/.tag
Modifying .tag files using echo, grep, sed may be a little hard sometimes.
There are some convenience scripts available through the tagfs-utils project.
See https://github.com/marook/tagfs-utils for details.
---------------------------------------------------------------------
Usage
After installation tagfs can be started the following way.
Mount a tagged directory:
$ tagfs -i /path/to/my/items/directory /path/to/my/mount/point
Unmount a tagged directory:
$ fusermount -u /path/to/my/mount/point
Right now tagfs reads the taggings only when it's getting mounted. So if you
modify the tags after mounting you will not see any changes in the tagfs file
system.
In general tagfs will try to reduce the number of filter directories below the
virtual file system. That's why you may not see some filters which would not
reduce the number of selected items.
---------------------------------------------------------------------
Configuration
tagfs can be configured through configuration files. Configuration files are
searched in different locations by tagfs. The following locations are used.
Locations with higher priority come first:
- <items directory>/.tagfs/tagfs.conf
- ~/.tagfs/tagfs.conf
- /etc/tagfs/tagfs.conf
Right now the following configuration options are supported.
---------------------------------------------------------------------
Configuration - Options - tagFileName
Through this option the name of the parsed tag files can be specified. The
default value is '.tag'.
Example:
[global]
tagFileName = ABOUT
---------------------------------------------------------------------
Configuration - Options - enableValueFilters
You can enable or disable value filters. If you enable value filters you will
see filter directories for each tag value. For value filters the tag's
context can be anyone. The default value is 'false'.
Example:
[global]
enableValueFilters = true
---------------------------------------------------------------------
Configuration - Options - enableRootItemLinks
To show links to all items in the tagfs '/' directory enable this option. The
default value is 'false'.
Example:
[global]
enableRootItemLinks = true
+---------------------------------------------------------------------
+Freebase Integration
+
+Freebase is an open graph of people, places and things. tagfs allows you to
+extend your own taggings with data directly from the freebase graph.
+
+In order to use freebase you need to install the freebase-python bindings. They
+are available via https://code.google.com/p/freebase-python/
+
+TODO howto query freebase
+
+
---------------------------------------------------------------------
Further Reading
Using a file system for my bank account (Markus Pielmeier)
http://pielmeier.blogspot.com/2010/08/using-file-system-for-my-bank-account.html
---------------------------------------------------------------------
Contact
* homepage: http://wiki.github.com/marook/tagfs
* user group: http://groups.google.com/group/tagfs
* author: Markus Pielmeier <[email protected]>
|
marook/tagfs
|
11fab09c054f36238b75629d26d28bd29614b5aa
|
added ~/.local/bin to PATH hint
|
diff --git a/README b/README
index bf94d90..024f7c8 100644
--- a/README
+++ b/README
@@ -1,160 +1,164 @@
tagfs - tag file system
1) Introduction
2) Requirements
3) Installation
4) Tagging Files
5) Usage
6) Configuration
6.1) Options
6.1.1) tagFileName
6.1.2) enableValueFilters
6.1.3) enableRootItemLinks
7) Further Reading
8) Contact
---------------------------------------------------------------------
Introduction
tagfs is used to organize your files using tags.
This document contains basic usage instructions for users. To develop or debug
tagfs see the README.dev file.
---------------------------------------------------------------------
Requirements
* python 2.5, 2.6, 2.7
* Linux kernel with fuse enabled
* python-fuse installed
---------------------------------------------------------------------
Installation
To install tagfs into your home directory type the following:
$ python setup.py test e2e_test install --home ~/.local
If you haven't already extended your local python path then add the following
to your environment configuration script. For example to your ~/.bashrc:
-export PYTHONPATH=~/.local/lib/python:$PYTHONPATH
+$ export PYTHONPATH=~/.local/lib/python:$PYTHONPATH
+
+You may also need to add ~/.local/bin to your PATH environment variable:
+
+$ export PATH=~/.local/bin:$PATH
---------------------------------------------------------------------
Tagging Files
Before you can filter anything using tagfs you need to tag your items. An item
is a directory which contains a file called .tag. All items must be below one
directory.
Let's create a simple item structure.
First we create the root directory for all items:
$ mkdir items
Then we create our first item:
$ mkdir items/Ted
We tag the 'Ted' item as movie:
$ echo movie >> items/Ted/.tag
We also tag 'Ted' as genre comedy:
$ echo 'genre: comedy' >> items/Ted/.tag
Then we add a second item:
$ mkdir items/banana
$ echo fruit >> items/banana/.tag
$ echo 'genre: delicious' >> items/banana/.tag
Modifying .tag files using echo, grep, sed may be a little hard sometimes.
There are some convenience scripts available through the tagfs-utils project.
See https://github.com/marook/tagfs-utils for details.
---------------------------------------------------------------------
Usage
After installation tagfs can be started the following way.
Mount a tagged directory:
$ tagfs -i /path/to/my/items/directory /path/to/my/mount/point
Unmount a tagged directory:
$ fusermount -u /path/to/my/mount/point
Right now tagfs reads the taggings only when it's getting mounted. So if you
modify the tags after mounting you will not see any changes in the tagfs file
system.
In general tagfs will try to reduce the number of filter directories below the
virtual file system. That's why you may not see some filters which would not
reduce the number of selected items.
---------------------------------------------------------------------
Configuration
tagfs can be configured through configuration files. Configuration files are
searched in different locations by tagfs. The following locations are used.
Locations with higher priority come first:
- <items directory>/.tagfs/tagfs.conf
- ~/.tagfs/tagfs.conf
- /etc/tagfs/tagfs.conf
Right now the following configuration options are supported.
---------------------------------------------------------------------
Configuration - Options - tagFileName
Through this option the name of the parsed tag files can be specified. The
default value is '.tag'.
Example:
[global]
tagFileName = ABOUT
---------------------------------------------------------------------
Configuration - Options - enableValueFilters
You can enable or disable value filters. If you enable value filters you will
see filter directories for each tag value. For value filters the tag's
context can be anyone. The default value is 'false'.
Example:
[global]
enableValueFilters = true
---------------------------------------------------------------------
Configuration - Options - enableRootItemLinks
To show links to all items in the tagfs '/' directory enable this option. The
default value is 'false'.
Example:
[global]
enableRootItemLinks = true
---------------------------------------------------------------------
Further Reading
Using a file system for my bank account (Markus Pielmeier)
http://pielmeier.blogspot.com/2010/08/using-file-system-for-my-bank-account.html
---------------------------------------------------------------------
Contact
* homepage: http://wiki.github.com/marook/tagfs
* user group: http://groups.google.com/group/tagfs
* author: Markus Pielmeier <[email protected]>
|
marook/tagfs
|
d8ca342f8a463de6e124375269546f194753cffa
|
added tagging documentation
|
diff --git a/README b/README
index a1397c3..bf94d90 100644
--- a/README
+++ b/README
@@ -1,120 +1,160 @@
tagfs - tag file system
1) Introduction
2) Requirements
3) Installation
-4) Usage
-5) Configuration
-5.1) Options
-5.1.1) tagFileName
-5.1.2) enableValueFilters
-5.1.3) enableRootItemLinks
-6) Further Reading
-7) Contact
+4) Tagging Files
+5) Usage
+6) Configuration
+6.1) Options
+6.1.1) tagFileName
+6.1.2) enableValueFilters
+6.1.3) enableRootItemLinks
+7) Further Reading
+8) Contact
---------------------------------------------------------------------
Introduction
tagfs is used to organize your files using tags.
This document contains basic usage instructions for users. To develop or debug
tagfs see the README.dev file.
---------------------------------------------------------------------
Requirements
* python 2.5, 2.6, 2.7
* Linux kernel with fuse enabled
* python-fuse installed
---------------------------------------------------------------------
Installation
To install tagfs into your home directory type the following:
$ python setup.py test e2e_test install --home ~/.local
If you haven't already extended your local python path then add the following
to your environment configuration script. For example to your ~/.bashrc:
export PYTHONPATH=~/.local/lib/python:$PYTHONPATH
+---------------------------------------------------------------------
+Tagging Files
+
+Before you can filter anything using tagfs you need to tag your items. An item
+is a directory which contains a file called .tag. All items must be below one
+directory.
+
+Let's create a simple item structure.
+
+First we create the root directory for all items:
+$ mkdir items
+
+Then we create our first item:
+$ mkdir items/Ted
+
+We tag the 'Ted' item as movie:
+$ echo movie >> items/Ted/.tag
+
+We also tag 'Ted' as genre comedy:
+$ echo 'genre: comedy' >> items/Ted/.tag
+
+Then we add a second item:
+$ mkdir items/banana
+$ echo fruit >> items/banana/.tag
+$ echo 'genre: delicious' >> items/banana/.tag
+
+Modifying .tag files using echo, grep, sed may be a little hard sometimes.
+There are some convenience scripts available through the tagfs-utils project.
+See https://github.com/marook/tagfs-utils for details.
+
+
---------------------------------------------------------------------
Usage
After installation tagfs can be started the following way.
Mount a tagged directory:
$ tagfs -i /path/to/my/items/directory /path/to/my/mount/point
Unmount a tagged directory:
$ fusermount -u /path/to/my/mount/point
+Right now tagfs reads the taggings only when it's getting mounted. So if you
+modify the tags after mounting you will not see any changes in the tagfs file
+system.
+
+In general tagfs will try to reduce the number of filter directories below the
+virtual file system. That's why you may not see some filters which would not
+reduce the number of selected items.
+
---------------------------------------------------------------------
Configuration
tagfs can be configured through configuration files. Configuration files are
searched in different locations by tagfs. The following locations are used.
Locations with higher priority come first:
- <items directory>/.tagfs/tagfs.conf
- ~/.tagfs/tagfs.conf
- /etc/tagfs/tagfs.conf
Right now the following configuration options are supported.
---------------------------------------------------------------------
Configuration - Options - tagFileName
Through this option the name of the parsed tag files can be specified. The
default value is '.tag'.
Example:
[global]
tagFileName = ABOUT
---------------------------------------------------------------------
Configuration - Options - enableValueFilters
You can enable or disable value filters. If you enable value filters you will
see filter directories for each tag value. For value filters the tag's
context can be anyone. The default value is 'false'.
Example:
[global]
enableValueFilters = true
---------------------------------------------------------------------
Configuration - Options - enableRootItemLinks
To show links to all items in the tagfs '/' directory enable this option. The
default value is 'false'.
Example:
[global]
enableRootItemLinks = true
---------------------------------------------------------------------
Further Reading
Using a file system for my bank account (Markus Pielmeier)
http://pielmeier.blogspot.com/2010/08/using-file-system-for-my-bank-account.html
---------------------------------------------------------------------
Contact
* homepage: http://wiki.github.com/marook/tagfs
* user group: http://groups.google.com/group/tagfs
* author: Markus Pielmeier <[email protected]>
|
marook/tagfs
|
6a29ab9e9883ebcef603727775055afdd132ff0d
|
added backlog
|
diff --git a/backlog b/backlog
new file mode 100644
index 0000000..4e273ae
--- /dev/null
+++ b/backlog
@@ -0,0 +1 @@
+* add 'howto tag files' to documentation
|
marook/tagfs
|
b34f2c2bccf371dfc9b0e4c924b5f479eed5fafa
|
added .unset filter for context filter directories
|
diff --git a/src/modules/tagfs/node_filter_context.py b/src/modules/tagfs/node_filter_context.py
index 70f5703..3f67849 100644
--- a/src/modules/tagfs/node_filter_context.py
+++ b/src/modules/tagfs/node_filter_context.py
@@ -1,102 +1,123 @@
#
# Copyright 2011 Markus Pielmeier
#
# This file is part of tagfs.
#
# tagfs is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# tagfs is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with tagfs. If not, see <http://www.gnu.org/licenses/>.
#
from cache import cache
from node import Stat, ItemLinkNode, DirectoryNode
from node_filter import FilterDirectoryNode
from node_untagged_items import UntaggedItemsDirectoryNode
class ContextValueFilterDirectoryNode(FilterDirectoryNode):
def __init__(self, itemAccess, config, parentNode, context, value):
super(ContextValueFilterDirectoryNode, self).__init__(itemAccess, config)
self.parentNode = parentNode
self.context = context
self.value = value
@property
def name(self):
return self.value
@property
def items(self):
for item in self.parentNode.items:
if not item.isTaggedWithContextValue(self.context, self.value):
continue
yield item
+class UnsetContextFilterDirectoryNode(FilterDirectoryNode):
+
+ def __init__(self, itemAccess, config, parentNode, context):
+ super(UnsetContextFilterDirectoryNode, self).__init__(itemAccess, config)
+ self.parentNode = parentNode
+ self.context = context
+
+ @property
+ def name(self):
+ return '.unset'
+
+ @property
+ def items(self):
+ for item in self.parentNode.parentNode.items:
+ if item.isTaggedWithContext(self.context):
+ continue
+
+ yield item
+
class ContextValueListDirectoryNode(DirectoryNode):
def __init__(self, itemAccess, config, parentNode, context):
self.itemAccess = itemAccess
self.config = config
self.parentNode = parentNode
self.context = context
@property
def name(self):
return self.context
@property
def attr(self):
s = super(ContextValueListDirectoryNode, self).attr
# TODO why nlink == 2?
s.st_nlink = 2
# TODO write test case which tests st_mtime == itemAccess.parseTime
s.st_mtime = self.itemAccess.parseTime
s.st_ctime = s.st_mtime
s.st_atime = s.st_mtime
return s
@property
def items(self):
for item in self.parentNode.items:
if not item.isTaggedWithContext(self.context):
continue
yield item
@property
def contextValues(self):
values = set()
for item in self.parentNode.items:
for tag in item.getTagsByContext(self.context):
values.add(tag.value)
return values
@property
def _entries(self):
+ yield UnsetContextFilterDirectoryNode(self.itemAccess, self.config, self, self.context)
+
for value in self.contextValues:
yield ContextValueFilterDirectoryNode(self.itemAccess, self.config, self, self.context, value)
def addsValue(self, parentItems):
if(super(ContextValueListDirectoryNode, self).addsValue(parentItems)):
return True
for e in self._entries:
if(e.addsValue(parentItems)):
return True
return False
diff --git a/test/e2e/contextValueFilter/assert b/test/e2e/contextValueFilter/assert
index a952aa8..6336bfa 100755
--- a/test/e2e/contextValueFilter/assert
+++ b/test/e2e/contextValueFilter/assert
@@ -1,5 +1,7 @@
#!/bin/bash
assertLink "$TEST_MOUNT_DIR/category/fruit/banana"
assertLink "$TEST_MOUNT_DIR/category/vegetable/carrot"
+
+assertLink "$TEST_MOUNT_DIR/category/.unset/car"
diff --git a/test/e2e/contextValueFilter/items/car/.tag b/test/e2e/contextValueFilter/items/car/.tag
new file mode 100644
index 0000000..e69de29
|
marook/tagfs
|
8ea74a0f80a6a84337f20bdeaf151a6ab8758b45
|
added .any_context filter directory
|
diff --git a/src/modules/tagfs/item_access.py b/src/modules/tagfs/item_access.py
index eeed313..7e05447 100644
--- a/src/modules/tagfs/item_access.py
+++ b/src/modules/tagfs/item_access.py
@@ -1,292 +1,297 @@
#
# Copyright 2009 Markus Pielmeier
#
# This file is part of tagfs.
#
# tagfs is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# tagfs is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with tagfs. If not, see <http://www.gnu.org/licenses/>.
import logging
import os
import time
import traceback
from cache import cache
class Tag(object):
def __init__(self, value, context = None):
if context == None:
self.context = None
else:
self.context = context.strip()
self.value = value.strip()
if not self.context == None and len(self.context) == 0:
# we don't allow empty strings as they can't be represented as a
# directory very well
raise ValueError()
if len(self.value) == 0:
# we don't allow empty strings as they can't be represented as a
# directory very well
raise ValueError()
def __hash__(self):
return (self.context, self.value).__hash__()
def __eq__(self, other):
return self.value == other.value and self.context == other.context
def __repr__(self):
return '<Tag %s: %s>' % (self.context, self.value)
def parseTagsFromFile(tagFileName):
"""Parses the tags from the specified file.
@return: The parsed values are returned as a set containing Tag objects.
@see: Tag
"""
tags = set()
tagFile = open(tagFileName, 'r')
try:
for rawTag in tagFile.readlines():
rawTag = rawTag.strip()
try:
if len(rawTag) == 0:
continue
tagTuple = rawTag.split(':', 1)
if len(tagTuple) == 1:
tagContext = None
tagValue = tagTuple[0]
else:
tagContext = tagTuple[0]
tagValue = tagTuple[1]
tag = Tag(tagValue, context = tagContext)
tags.add(tag)
except:
logging.warning('Skipping tagging \'%s\' from file \'%s\' as it can\'t be parsed\n%s.' % (rawTag, tagFileName, traceback.format_exc()))
finally:
tagFile.close()
return tags
class Item(object):
def __init__(self, name, itemAccess):
self.name = name
self.itemAccess = itemAccess
# TODO register at file system to receive tag file change events.
@property
@cache
def itemDirectory(self):
return os.path.join(self.itemAccess.dataDirectory, self.name)
@property
@cache
def _tagFileName(self):
"""Returns the name of the tag file for this item.
"""
itemDirectory = self.itemDirectory
return os.path.join(itemDirectory, self.itemAccess.tagFileName)
def __parseTags(self):
tagFileName = self._tagFileName
if not os.path.exists(tagFileName):
return None
return parseTagsFromFile(tagFileName)
@property
@cache
def tagsCreationTime(self):
tagFileName = self._tagFileName
if not os.path.exists(tagFileName):
return None
return os.path.getctime(self._tagFileName)
@property
@cache
def tagsModificationTime(self):
"""Returns the last time when the tags have been modified.
"""
tagFileName = self._tagFileName
if not os.path.exists(tagFileName):
return None
return os.path.getmtime(tagFileName)
@property
@cache
def tags(self):
"""Returns the tags as a list for this item.
"""
return self.__parseTags()
+ @property
+ def values(self):
+ for t in self.tags:
+ yield t.value
+
def getTagsByContext(self, context):
for t in self.tags:
if context != t.context:
continue
yield t
def isTaggedWithContextValue(self, context, value):
for t in self.getTagsByContext(context):
if value == t.value:
return True
return False
def isTaggedWithContext(self, context):
# TODO don't create whole list... just check wheather list is empty
return (len([c for c in self.getTagsByContext(context)]) > 0)
def isTaggedWithValue(self, value):
- for t in self.tags:
- if value == t.value:
+ for v in self.values:
+ if value == v:
return True
return False
@property
@cache
def tagged(self):
return os.path.exists(self._tagFileName)
def __repr__(self):
return '<Item %s>' % self.name
class ItemAccess(object):
"""This is the access point to the Items.
"""
def __init__(self, dataDirectory, tagFileName):
self.dataDirectory = dataDirectory
self.tagFileName = tagFileName
self.__items = None
self.__tags = None
self.__taggedItems = None
self.__untaggedItems = None
self.parseTime = 0
def __parseItems(self):
items = {}
logging.debug('Start parsing items from dir: %s', self.dataDirectory)
for itemName in os.listdir(self.dataDirectory):
if itemName == '.tagfs':
# skip directory with configuration
continue
try:
item = Item(itemName, self)
items[itemName] = item
except IOError, (error, strerror):
logging.error('Can \'t read tags for item %s: %s',
itemName,
strerror)
logging.debug('Found %s items', len(items))
self.parseTime = time.time()
return items
@property
@cache
def items(self):
return self.__parseItems()
@property
@cache
def tags(self):
tags = set()
for item in self.items.itervalues():
if not item.tagged:
continue
tags = tags | item.tags
return tags
@property
@cache
def taggedItems(self):
return set([item for item in self.items.itervalues() if item.tagged])
@property
@cache
def untaggedItems(self):
return set([item for item in self.items.itervalues() if not item.tagged])
def getItemDirectory(self, item):
return os.path.join(self.dataDirectory, item)
def contextTags(self, context):
contextTags = set()
for tag in self.tags:
if tag.context == context:
contextTags.add(tag)
return contextTags
@property
@cache
def contexts(self):
contexts = set()
for tag in self.tags:
if tag.context == None:
continue
contexts.add(tag.context)
return contexts
@property
@cache
def values(self):
values = set()
for tag in self.tags:
values.add(tag.value)
return values
def __str__(self):
return '[' + ', '.join([field + ': ' + str(self.__dict__[field]) for field in ['dataDirectory', 'tagFileName']]) + ']'
diff --git a/src/modules/tagfs/node_filter.py b/src/modules/tagfs/node_filter.py
index 4e0ed85..894f441 100644
--- a/src/modules/tagfs/node_filter.py
+++ b/src/modules/tagfs/node_filter.py
@@ -1,95 +1,98 @@
#
# Copyright 2011 Markus Pielmeier
#
# This file is part of tagfs.
#
# tagfs is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# tagfs is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with tagfs. If not, see <http://www.gnu.org/licenses/>.
#
from cache import cache
from node import Stat, ItemLinkNode, DirectoryNode
from node_export import ExportDirectoryNode
class FilterDirectoryNode(DirectoryNode):
def __init__(self, itemAccess, config):
self.itemAccess = itemAccess
self.config = config
@property
def attr(self):
s = super(FilterDirectoryNode, self).attr
# TODO why nlink == 2?
s.st_nlink = 2
# TODO write test case which tests st_mtime == itemAccess.parseTime
s.st_mtime = self.itemAccess.parseTime
s.st_ctime = s.st_mtime
s.st_atime = s.st_mtime
return s
@property
def contexts(self):
c = set()
for item in self.items:
for t in item.tags:
context = t.context
if context is None:
continue
c.add(t.context)
return c
@property
def _enableItemLinks(self):
return True
@property
def _entries(self):
# the import is not global because we want to prevent a cyclic
# dependency (ugly but works)
from node_filter_context import ContextValueListDirectoryNode
from node_filter_value import ValueFilterDirectoryNode
+ from node_filter_any_context import AnyContextValueListDirectoryNode
yield ExportDirectoryNode(self.itemAccess, self)
+ yield AnyContextValueListDirectoryNode(self.itemAccess, self.config, self)
+
if(self.config.enableValueFilters):
for value in self.itemAccess.values:
yield ValueFilterDirectoryNode(self.itemAccess, self.config, self, value)
for context in self.contexts:
yield ContextValueListDirectoryNode(self.itemAccess, self.config, self, context)
if(self._enableItemLinks):
for item in self.items:
yield ItemLinkNode(item)
def addsValue(self, parentItems):
itemsLen = len(list(self.items))
if(itemsLen == 0):
return False
# TODO we should not compare the lengths but whether the child and
# parent items are different
parentItemsLen = len(list(parentItems))
return itemsLen != parentItemsLen
def _addsValue(self, child):
return child.addsValue(self.items)
diff --git a/src/modules/tagfs/node_filter_any_context.py b/src/modules/tagfs/node_filter_any_context.py
new file mode 100644
index 0000000..eb708c3
--- /dev/null
+++ b/src/modules/tagfs/node_filter_any_context.py
@@ -0,0 +1,96 @@
+#
+# Copyright 2012 Markus Pielmeier
+#
+# This file is part of tagfs.
+#
+# tagfs is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# tagfs is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with tagfs. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from cache import cache
+from node import Stat, ItemLinkNode, DirectoryNode
+from node_filter import FilterDirectoryNode
+from node_untagged_items import UntaggedItemsDirectoryNode
+
+class AnyContextValueFilterDirectoryNode(FilterDirectoryNode):
+
+ def __init__(self, itemAccess, config, parentNode, value):
+ super(AnyContextValueFilterDirectoryNode, self).__init__(itemAccess, config)
+ self.parentNode = parentNode
+ self.value = value
+
+ @property
+ def name(self):
+ return self.value
+
+ @property
+ def items(self):
+ for item in self.parentNode.items:
+ if not item.isTaggedWithValue(self.value):
+ continue
+
+ yield item
+
+class AnyContextValueListDirectoryNode(DirectoryNode):
+
+ def __init__(self, itemAccess, config, parentNode):
+ self.itemAccess = itemAccess
+ self.config = config
+ self.parentNode = parentNode
+
+ @property
+ def name(self):
+ return '.any_context'
+
+ @property
+ def attr(self):
+ s = super(AnyContextValueListDirectoryNode, self).attr
+
+ # TODO why nlink == 2?
+ s.st_nlink = 2
+
+ # TODO write test case which tests st_mtime == itemAccess.parseTime
+ s.st_mtime = self.itemAccess.parseTime
+ s.st_ctime = s.st_mtime
+ s.st_atime = s.st_mtime
+
+ return s
+
+ @property
+ def items(self):
+ return self.parentNode.items
+
+ @property
+ def contextValues(self):
+ values = set()
+
+ for item in self.parentNode.items:
+ for v in item.values:
+ values.add(v)
+
+ return values
+
+ @property
+ def _entries(self):
+ for value in self.contextValues:
+ yield AnyContextValueFilterDirectoryNode(self.itemAccess, self.config, self, value)
+
+ def addsValue(self, parentItems):
+ if(super(AnyContextValueListDirectoryNode, self).addsValue(parentItems)):
+ return True
+
+ for e in self._entries:
+ if(e.addsValue(parentItems)):
+ return True
+
+ return False
diff --git a/test/e2e/anyContextValueFilter/assert b/test/e2e/anyContextValueFilter/assert
new file mode 100755
index 0000000..2f49c26
--- /dev/null
+++ b/test/e2e/anyContextValueFilter/assert
@@ -0,0 +1,5 @@
+#!/bin/bash
+
+assertLink "$TEST_MOUNT_DIR/.any_context/fruit/banana"
+
+assertLink "$TEST_MOUNT_DIR/.any_context/vegetable/carrot"
diff --git a/test/e2e/anyContextValueFilter/items/banana/.tag b/test/e2e/anyContextValueFilter/items/banana/.tag
new file mode 100644
index 0000000..c7f47d1
--- /dev/null
+++ b/test/e2e/anyContextValueFilter/items/banana/.tag
@@ -0,0 +1 @@
+category: fruit
diff --git a/test/e2e/anyContextValueFilter/items/carrot/.tag b/test/e2e/anyContextValueFilter/items/carrot/.tag
new file mode 100644
index 0000000..6e769b9
--- /dev/null
+++ b/test/e2e/anyContextValueFilter/items/carrot/.tag
@@ -0,0 +1 @@
+category: vegetable
|
marook/tagfs
|
64333f0232032e0e17704e22f4ba15705c3635f5
|
escaped compared file names in e2e tests
|
diff --git a/bin/e2eAssertSandbox.sh b/bin/e2eAssertSandbox.sh
index 0f3172c..bd92c0a 100755
--- a/bin/e2eAssertSandbox.sh
+++ b/bin/e2eAssertSandbox.sh
@@ -1,26 +1,26 @@
#!/bin/bash
set -e
ASSERT_BIN=$1
fail() {
echo "TEST FAILED: $1" >&2
exit 1
}
assertLink(){
PATH=$1
if [ ! -L "$PATH" ]
then
fail "Expected path to be link: $PATH"
fi
}
assertEqualContent(){
- cmp $1 $2 > /dev/null || fail "File content is not equal: $1 and $2 ($DIFF)"
+ cmp "$1" "$2" > /dev/null || fail "File content is not equal: $1 and $2 ($DIFF)"
}
cd `dirname "$ASSERT_BIN"`
. $ASSERT_BIN
|
marook/tagfs
|
f90ab15d4efb4cf2e444bff9a323e16adf573eed
|
added e2e test for empty csv export
|
diff --git a/bin/e2eAssertSandbox.sh b/bin/e2eAssertSandbox.sh
index 979cdb5..0f3172c 100755
--- a/bin/e2eAssertSandbox.sh
+++ b/bin/e2eAssertSandbox.sh
@@ -1,19 +1,26 @@
#!/bin/bash
+set -e
+
ASSERT_BIN=$1
fail() {
- echo "$1" >&2
+ echo "TEST FAILED: $1" >&2
exit 1
}
assertLink(){
PATH=$1
if [ ! -L "$PATH" ]
then
fail "Expected path to be link: $PATH"
fi
}
+assertEqualContent(){
+ cmp $1 $2 > /dev/null || fail "File content is not equal: $1 and $2 ($DIFF)"
+}
+
+cd `dirname "$ASSERT_BIN"`
. $ASSERT_BIN
diff --git a/test/e2e/emptyExport/assert b/test/e2e/emptyExport/assert
new file mode 100755
index 0000000..2771f06
--- /dev/null
+++ b/test/e2e/emptyExport/assert
@@ -0,0 +1,3 @@
+#!/bin/bash
+
+assertEqualContent "$TEST_MOUNT_DIR/.export/export.csv" "export.csv"
diff --git a/test/e2e/emptyExport/export.csv b/test/e2e/emptyExport/export.csv
new file mode 100644
index 0000000..7cc35dd
--- /dev/null
+++ b/test/e2e/emptyExport/export.csv
@@ -0,0 +1 @@
+"name"
diff --git a/test/e2e/emptyExport/items/empty_dir b/test/e2e/emptyExport/items/empty_dir
new file mode 100644
index 0000000..e69de29
|
marook/tagfs
|
6bc916bf92dd4c0d89cde49565fa60327f1877c4
|
added context tagging test case
|
diff --git a/test/e2e/contextValueFilter/assert b/test/e2e/contextValueFilter/assert
new file mode 100755
index 0000000..a952aa8
--- /dev/null
+++ b/test/e2e/contextValueFilter/assert
@@ -0,0 +1,5 @@
+#!/bin/bash
+
+assertLink "$TEST_MOUNT_DIR/category/fruit/banana"
+
+assertLink "$TEST_MOUNT_DIR/category/vegetable/carrot"
diff --git a/test/e2e/contextValueFilter/items/banana/.tag b/test/e2e/contextValueFilter/items/banana/.tag
new file mode 100644
index 0000000..c7f47d1
--- /dev/null
+++ b/test/e2e/contextValueFilter/items/banana/.tag
@@ -0,0 +1 @@
+category: fruit
diff --git a/test/e2e/contextValueFilter/items/carrot/.tag b/test/e2e/contextValueFilter/items/carrot/.tag
new file mode 100644
index 0000000..6e769b9
--- /dev/null
+++ b/test/e2e/contextValueFilter/items/carrot/.tag
@@ -0,0 +1 @@
+category: vegetable
|
marook/tagfs
|
07c24ff8c6117bce10d18126bc63023aeae7d915
|
added docs for untaggedItems test case
|
diff --git a/test/e2e/untaggedItems/assert b/test/e2e/untaggedItems/assert
index 24004fd..33bbafb 100755
--- a/test/e2e/untaggedItems/assert
+++ b/test/e2e/untaggedItems/assert
@@ -1,3 +1,7 @@
#!/bin/bash
+#
+# This makes sure that items without .tag file are linked within
+# untagged items.
+#
assertLink "$TEST_MOUNT_DIR/.untagged/dir without tags"
|
marook/tagfs
|
6cba48db7303b30964e1ffa4bbf3b28c7fa5121a
|
added some documentation about end-to-end tests
|
diff --git a/README.dev b/README.dev
index 6853992..8f88b97 100644
--- a/README.dev
+++ b/README.dev
@@ -1,79 +1,95 @@
tagfs - tag file system
developer readme
1) Logging
2) Profiling
3) Tracing
4) Distribution
4.1) tar Distribution
5) Tests
6) Code Coverage
+7) End-To-End Tests
---------------------------------------------------------------------
Logging
You can enable logging by setting a debug environment variable before you
launch tagfs:
$ export DEBUG=1
tagfs will log to the console and the file /tmp/tagfs.log
---------------------------------------------------------------------
Profiling
You can enable profiling by setting a profile environment variable before you
launch tagfs:
$ export PROFILE=1
After unmounting your tagfs file system a profile file will be written. The
profile file will be written to the current directory. The profile file will
be named 'tagfs.profile'.
---------------------------------------------------------------------
Tracing
Tracing is done via the log output. There is a utility script to analyze the
log files. To analyze a log file execute the following
$ util/trace_logfiles.py /tmp/tagfs.log
The tracing script will output some statistics.
---------------------------------------------------------------------
tar Distribution
The tagfs project contains scripts for creating source distribution packages.
To create a tar distribution package you execute the following:
$ make distsnapshot
The make call will create an archive within the target directory. The created
tar file is used for tagfs source distribution.
---------------------------------------------------------------------
Tests
You can execute the test cases via the setup.py script in the project's root
directory.
$ python setup.py test
+
---------------------------------------------------------------------
Code Coverage
The tagfs unit tests can be executed with code coverage measurement enabled.
setup.py will measure the code coverage if the coverage lib is installed.
The coverage lib is available here: http://nedbatchelder.com/code/coverage
If you're a debian user you can try:
$ apt-get install python-coverage
The code coverage will be written below the reports directory after executing
the test cases:
$ python setup.py test
+
+
+---------------------------------------------------------------------
+End-To-End Tests
+
+tagfs contains some end-to-end tests. The end-to-end tests first mount an
+items directory and afterwards execute a shell script which can assert certain
+conditions in the mounted tagfs.
+
+The end-to-end tests can be run via the setup.py:
+
+$ python setup.py e2e_test
+
+The end-to-end tests are located below the test/e2e directory.
|
marook/tagfs
|
4efe60c207b3f23311a83c41a30a8fab5a1c71c2
|
added support for end-to-end tests
|
diff --git a/bin/e2eAssertSandbox.sh b/bin/e2eAssertSandbox.sh
new file mode 100755
index 0000000..979cdb5
--- /dev/null
+++ b/bin/e2eAssertSandbox.sh
@@ -0,0 +1,19 @@
+#!/bin/bash
+
+ASSERT_BIN=$1
+
+fail() {
+ echo "$1" >&2
+ exit 1
+}
+
+assertLink(){
+ PATH=$1
+
+ if [ ! -L "$PATH" ]
+ then
+ fail "Expected path to be link: $PATH"
+ fi
+}
+
+. $ASSERT_BIN
diff --git a/bin/runEndToEndTest.sh b/bin/runEndToEndTest.sh
new file mode 100755
index 0000000..1d6380d
--- /dev/null
+++ b/bin/runEndToEndTest.sh
@@ -0,0 +1,47 @@
+#!/bin/bash
+
+set -e
+
+TEST_DIR=$1
+
+fail() {
+ echo "$1" >&2
+ exit 1
+}
+
+cleanupTagFS(){
+ fusermount -u $TEST_MOUNT_DIR
+ rmdir $TEST_MOUNT_DIR
+}
+
+if [ ! -d "$TEST_DIR" ]
+then
+ fail "TEST_DIR is not a directory: $TEST_DIR"
+fi
+
+TEST_NAME=`basename $TEST_DIR`
+
+echo ''
+echo '======================================================'
+echo " Executing end-to-end test $TEST_NAME"
+
+PYTHON=python
+
+BIN_DIR=`dirname "$0"`
+PROJECT_DIR=$BIN_DIR/..
+PYMODDIR=$PROJECT_DIR/src/modules
+export PYTHONPATH=$PYMODDIR:$PYTHONPATH
+
+export TEST_MOUNT_DIR=`mktemp -d --tmpdir tagfs_e2e.$TEST_NAME.XXXXXXXXXX`
+
+echo "Using mount $TEST_MOUNT_DIR"
+
+$PYTHON $PROJECT_DIR/src/bin/tagfs -i $TEST_DIR/items $TEST_MOUNT_DIR
+
+trap cleanupTagFS EXIT
+
+echo 'Asserting mount'
+
+$BIN_DIR/e2eAssertSandbox.sh $TEST_DIR/assert
+
+echo "Success"
diff --git a/setup.py b/setup.py
index af7e1d6..a66dddd 100644
--- a/setup.py
+++ b/setup.py
@@ -1,264 +1,290 @@
#!/usr/bin/env python
#
# Copyright 2009 Peter Prohaska
#
# This file is part of tagfs.
#
# tagfs is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# tagfs is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with tagfs. If not, see <http://www.gnu.org/licenses/>.
#
from distutils.core import setup, Command
import sys
import os
from os.path import (
basename,
dirname,
abspath,
splitext,
join as pjoin
)
from glob import glob
from unittest import TestLoader, TextTestRunner
import re
import datetime
+from subprocess import call
projectdir = dirname(abspath(__file__))
reportdir = pjoin(projectdir, 'reports')
srcdir = pjoin(projectdir, 'src')
bindir = pjoin(srcdir, 'bin')
moddir = pjoin(srcdir, 'modules')
testdir = pjoin(srcdir, 'test')
+endToEndTestDir = pjoin(projectdir, 'test', 'e2e')
testdatadir = pjoin(projectdir, 'etc', 'test', 'events')
testmntdir = pjoin(projectdir, 'mnt')
assert os.path.isdir(srcdir)
assert os.path.isdir(bindir)
assert os.path.isdir(moddir)
assert os.path.isdir(testdir)
assert os.path.isdir(testdatadir)
class Report(object):
def __init__(self):
self.reportDateTime = datetime.datetime.utcnow()
self.reportDir = os.path.join(reportdir, self.reportDateTime.strftime('%Y-%m-%d_%H_%M_%S'))
# fails when dir already exists which is nice
os.makedirs(self.reportDir)
@property
def coverageReportFileName(self):
return os.path.join(self.reportDir, 'coverage.txt')
@property
def unitTestReportFileName(self):
return os.path.join(self.reportDir, 'tests.txt')
def sourceFiles():
yield os.path.join(bindir, 'tagfs')
sourceFilePattern = re.compile('^.*[.]py$')
for root, dirs, files in os.walk(moddir):
for f in files:
if(not sourceFilePattern.match(f)):
continue
if(f.startswith('.#')):
continue
yield os.path.join(root, f)
def fullSplit(p):
head, tail = os.path.split(p)
if(len(head) > 0):
for n in fullSplit(head):
yield n
yield tail
def testModules():
testFilePattern = re.compile('^(test.*)[.]py$', re.IGNORECASE)
for root, dirs, files in os.walk(testdir):
for f in files:
m = testFilePattern.match(f)
if(not m):
continue
relDir = os.path.relpath(root, testdir)
yield '.'.join([n for n in fullSplit(relDir)] + [m.group(1), ])
def printFile(fileName):
if(not os.path.exists(fileName)):
# TODO maybe we should not silently return?
return
with open(fileName, 'r') as f:
for line in f:
sys.stdout.write(line)
class TestFailException(Exception):
'''Indicates that at lease one of the unit tests has failed
'''
pass
class test(Command):
description = 'run tests'
user_options = []
def initialize_options(self):
self._cwd = os.getcwd()
self._verbosity = 2
def finalize_options(self): pass
def run(self):
report = Report()
tests = [m for m in testModules()]
print "..using:"
print " moddir:", moddir
print " testdir:", testdir
print " testdatadir:", testdatadir
print " testmntdir:", testmntdir
print " tests:", tests
print " sys.path:", sys.path
print
# insert project lookup paths at index 0 to make sure they are used
# over global libraries
sys.path.insert(0, moddir)
sys.path.insert(0, testdir)
# TODO try to import all test cases here. the TestLoader is throwing
# very confusing errors when imports can't be resolved.
# configure logging
# TODO not sure how to enable this... it's a bit complicate to enable
# logging only for 'make mt' and disable it then for
# 'python setup.py test'. 'python setup.py test' is such a gabber...
#if 'DEBUG' in os.environ:
# from tagfs import log_config
# log_config.setUpLogging()
if 'DEBUG' in os.environ:
import logging
logging.basicConfig(level = logging.DEBUG)
suite = TestLoader().loadTestsFromNames(tests)
try:
with open(report.unitTestReportFileName, 'w') as testResultsFile:
r = TextTestRunner(stream = testResultsFile, verbosity = self._verbosity)
def runTests():
result = r.run(suite)
if(not result.wasSuccessful()):
raise TestFailException()
try:
import coverage
c = coverage.coverage()
c.start()
runTests()
c.stop()
with open(report.coverageReportFileName, 'w') as reportFile:
c.report([f for f in sourceFiles()], file = reportFile)
except ImportError:
# TODO ImportErrors from runTests() may look like coverage is missing
print ''
print 'coverage module not found.'
print 'To view source coverage stats install http://nedbatchelder.com/code/coverage/'
print ''
runTests()
finally:
# TODO use two streams instead of printing files after writing
printFile(report.unitTestReportFileName)
printFile(report.coverageReportFileName)
+class EndToEndTestFailure(Exception):
+
+ def __init__(self, testPath):
+ super(EndToEndTestFailure, self).__init__('end-to-end test failed: %s' % testPath)
+
+class EndToEndTests(Command):
+ description = 'execute the end-to-end tests'
+ user_options = []
+
+ def initialize_options(self):
+ pass
+
+ def finalize_options(self):
+ pass
+
+ def runTest(self, testPath):
+ if(not call(['bin/runEndToEndTest.sh', testPath]) is 0):
+ raise EndToEndTestFailure(testPath)
+
+ def run(self):
+ for endToEndDirName in os.listdir(endToEndTestDir):
+ self.runTest(os.path.join(endToEndTestDir, endToEndDirName))
+
# Overrides default clean (which cleans from build runs)
# This clean should probably be hooked into that somehow.
class clean_pyc(Command):
description = 'remove *.pyc files from source directory'
user_options = []
def initialize_options(self):
self._delete = []
for cwd, dirs, files in os.walk(projectdir):
self._delete.extend(
pjoin(cwd, f) for f in files if f.endswith('.pyc')
)
def finalize_options(self):
pass
def run(self):
for f in self._delete:
try:
os.unlink(f)
except OSError, e:
print "Strange '%s': %s" % (f, e)
# Could be a directory.
# Can we detect file in use errors or are they OSErrors
# as well?
# Shall we catch all?
setup(
cmdclass = {
'test': test,
'clean_pyc': clean_pyc,
+ 'e2e_test': EndToEndTests,
},
name = 'tagfs',
version = '0.1',
url = 'http://wiki.github.com/marook/tagfs',
description = '',
long_description = '',
author = 'Markus Pielmeier',
author_email = '[email protected]',
license = 'GPLv3',
download_url = 'http://github.com/marook/tagfs/downloads/tagfs_0.1-src.tar.bz2',
platforms = 'Linux',
requires = [],
classifiers = [
'Development Status :: 2 - Pre-Alpha',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: GNU General Public License (GPL)',
'Natural Language :: English',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python',
'Topic :: System :: Filesystems'
],
data_files = [
(pjoin('share', 'doc', 'tagfs'), ['AUTHORS', 'COPYING', 'README'])
],
# TODO maybe we should include src/bin/*?
scripts = [pjoin(bindir, 'tagfs')],
packages = ['tagfs'],
package_dir = {'': moddir},
)
diff --git a/test/e2e/untaggedItems/assert b/test/e2e/untaggedItems/assert
new file mode 100755
index 0000000..24004fd
--- /dev/null
+++ b/test/e2e/untaggedItems/assert
@@ -0,0 +1,3 @@
+#!/bin/bash
+
+assertLink "$TEST_MOUNT_DIR/.untagged/dir without tags"
diff --git a/test/e2e/untaggedItems/items/dir without tags/empty_dir b/test/e2e/untaggedItems/items/dir without tags/empty_dir
new file mode 100644
index 0000000..e69de29
|
marook/tagfs
|
8ff45af73e805709c466a65357912722a62c6dc4
|
added unit test for Tag class
|
diff --git a/src/test/tagfs_test_small/test_item_access_tag.py b/src/test/tagfs_test_small/test_item_access_tag.py
new file mode 100644
index 0000000..6c82215
--- /dev/null
+++ b/src/test/tagfs_test_small/test_item_access_tag.py
@@ -0,0 +1,36 @@
+#
+# Copyright 2012 Markus Pielmeier
+#
+# This file is part of tagfs.
+#
+# tagfs is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# tagfs is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with tagfs. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from unittest import TestCase
+
+from tagfs import item_access
+
+class TagTest(TestCase):
+
+ def testTagValueInfluencesHash(self):
+ self.assertTrue(item_access.Tag('a', None).__hash__() != item_access.Tag('b', None).__hash__())
+
+ def testTagContextInfluencesHash(self):
+ self.assertTrue(item_access.Tag('v', None).__hash__() != item_access.Tag('v', 'c').__hash__())
+
+ def testEqualTagsEqWhenContextNone(self):
+ self.assertTrue(item_access.Tag('t', None).__eq__(item_access.Tag('t', None)))
+
+ def testEqualTagsEqWhenContextStr(self):
+ self.assertTrue(item_access.Tag('t', 'c').__eq__(item_access.Tag('t', 'c')))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.