repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
DAAISy | DAAISy-main/src/utils/translate/instantiate.py | #! /usr/bin/env python3
from collections import defaultdict
from . import build_model
from . import pddl_fd as pddl
from . import pddl_to_prolog
from . import timers
def get_fluent_facts(task, model):
fluent_predicates = set()
for action in task.actions:
for effect in action.effects:
fluent_predicates.add(effect.literal.predicate)
for axiom in task.axioms:
fluent_predicates.add(axiom.name)
return {fact for fact in model
if fact.predicate in fluent_predicates}
def get_objects_by_type(typed_objects, types):
result = defaultdict(list)
supertypes = {}
for type in types:
supertypes[type.name] = type.supertype_names
for obj in typed_objects:
result[obj.type_name].append(obj.name)
for type in supertypes[obj.type_name]:
result[type].append(obj.name)
return result
def instantiate(task, model):
relaxed_reachable = False
fluent_facts = get_fluent_facts(task, model)
init_facts = set()
init_assignments = {}
for element in task.init:
if isinstance(element, pddl.Assign):
init_assignments[element.fluent] = element.expression
else:
init_facts.add(element)
type_to_objects = get_objects_by_type(task.objects, task.types)
instantiated_actions = []
instantiated_axioms = []
reachable_action_parameters = defaultdict(list)
for atom in model:
if isinstance(atom.predicate, pddl.Action):
action = atom.predicate
parameters = action.parameters
inst_parameters = atom.args[:len(parameters)]
# Note: It's important that we use the action object
# itself as the key in reachable_action_parameters (rather
# than action.name) since we can have multiple different
# actions with the same name after normalization, and we
# want to distinguish their instantiations.
reachable_action_parameters[action].append(inst_parameters)
variable_mapping = {par.name: arg
for par, arg in zip(parameters, atom.args)}
inst_action = action.instantiate(
variable_mapping, init_facts, init_assignments,
fluent_facts, type_to_objects,
task.use_min_cost_metric)
if inst_action:
instantiated_actions.append(inst_action)
elif isinstance(atom.predicate, pddl.Axiom):
axiom = atom.predicate
variable_mapping = {par.name: arg
for par, arg in zip(axiom.parameters, atom.args)}
inst_axiom = axiom.instantiate(variable_mapping, init_facts, fluent_facts)
if inst_axiom:
instantiated_axioms.append(inst_axiom)
elif atom.predicate == "@goal-reachable":
relaxed_reachable = True
return (relaxed_reachable, fluent_facts, instantiated_actions,
sorted(instantiated_axioms), reachable_action_parameters)
def explore(task):
prog = pddl_to_prolog.translate(task)
model = build_model.compute_model(prog)
with timers.timing("Completing instantiation"):
return instantiate(task, model)
if __name__ == "__main__":
import pddl_parser
task = pddl_parser.open()
relaxed_reachable, atoms, actions, axioms, _ = explore(task)
print("goal relaxed reachable: %s" % relaxed_reachable)
print("%d atoms:" % len(atoms))
for atom in atoms:
print(" ", atom)
print()
print("%d actions:" % len(actions))
for action in actions:
action.dump()
print()
print()
print("%d axioms:" % len(axioms))
for axiom in axioms:
axiom.dump()
print()
| 3,767 | 33.254545 | 86 | py |
DAAISy | DAAISy-main/src/utils/translate/normalize.py | #! /usr/bin/env python3
import copy
from . import pddl_fd as pddl
class ConditionProxy:
def clone_owner(self):
clone = copy.copy(self)
clone.owner = copy.copy(clone.owner)
return clone
class PreconditionProxy(ConditionProxy):
def __init__(self, action):
self.owner = action
self.condition = action.precondition
def set(self, new_condition):
self.owner.precondition = self.condition = new_condition
def register_owner(self, task):
task.actions.append(self.owner)
def delete_owner(self, task):
task.actions.remove(self.owner)
def build_rules(self, rules):
action = self.owner
rule_head = get_action_predicate(action)
rule_body = condition_to_rule_body(action.parameters, self.condition)
rules.append((rule_body, rule_head))
def get_type_map(self):
return self.owner.type_map
class EffectConditionProxy(ConditionProxy):
def __init__(self, action, effect):
self.action = action
self.owner = effect
self.condition = effect.condition
def set(self, new_condition):
self.owner.condition = self.condition = new_condition
def register_owner(self, task):
self.action.effects.append(self.owner)
def delete_owner(self, task):
self.action.effects.remove(self.owner)
def build_rules(self, rules):
effect = self.owner
rule_head = effect.literal
if not rule_head.negated:
rule_body = [get_action_predicate(self.action)]
rule_body += condition_to_rule_body([], self.condition)
rules.append((rule_body, rule_head))
def get_type_map(self):
return self.action.type_map
class AxiomConditionProxy(ConditionProxy):
def __init__(self, axiom):
self.owner = axiom
self.condition = axiom.condition
def set(self, new_condition):
self.owner.condition = self.condition = new_condition
def register_owner(self, task):
task.axioms.append(self.owner)
def delete_owner(self, task):
task.axioms.remove(self.owner)
def build_rules(self, rules):
axiom = self.owner
app_rule_head = get_axiom_predicate(axiom)
app_rule_body = condition_to_rule_body(axiom.parameters, self.condition)
rules.append((app_rule_body, app_rule_head))
params = axiom.parameters[:axiom.num_external_parameters]
eff_rule_head = pddl.Atom(axiom.name, [par.name for par in params])
eff_rule_body = [app_rule_head]
rules.append((eff_rule_body, eff_rule_head))
def get_type_map(self):
return self.owner.type_map
class GoalConditionProxy(ConditionProxy):
def __init__(self, task):
self.owner = task
self.condition = task.goal
def set(self, new_condition):
self.owner.goal = self.condition = new_condition
def register_owner(self, task):
# this assertion should never trigger, because disjunctive
# goals are now implemented with axioms
# (see substitute_complicated_goal)
assert False, "Disjunctive goals not (yet) implemented."
def delete_owner(self, task):
# this assertion should never trigger, because disjunctive
# goals are now implemented with axioms
# (see substitute_complicated_goal)
assert False, "Disjunctive goals not (yet) implemented."
def build_rules(self, rules):
rule_head = pddl.Atom("@goal-reachable", [])
rule_body = condition_to_rule_body([], self.condition)
rules.append((rule_body, rule_head))
def get_type_map(self):
# HACK!
# Method uniquify_variables HAS already been called (which is good).
# We call it here again for its SIDE EFFECT of collecting the type_map
# (which is bad). Having "top-level conditions" (currently, only goal
# conditions, but might also include safety conditions and similar)
# contained in a separate wrapper class that stores a type map might
# be a better design.
type_map = {}
self.condition.uniquify_variables(type_map)
return type_map
def get_action_predicate(action):
name = action
variables = [par.name for par in action.parameters]
if isinstance(action.precondition, pddl.ExistentialCondition):
variables += [par.name for par in action.precondition.parameters]
return pddl.Atom(name, variables)
def get_axiom_predicate(axiom):
name = axiom
variables = [par.name for par in axiom.parameters]
if isinstance(axiom.condition, pddl.ExistentialCondition):
variables += [par.name for par in axiom.condition.parameters]
return pddl.Atom(name, variables)
def all_conditions(task):
for action in task.actions:
yield PreconditionProxy(action)
for effect in action.effects:
yield EffectConditionProxy(action, effect)
for axiom in task.axioms:
yield AxiomConditionProxy(axiom)
yield GoalConditionProxy(task)
# [1] Remove universal quantifications from conditions.
#
# Replace, in a top-down fashion, <forall(vars, phi)> by <not(not-all-phi)>,
# where <not-all-phi> is a new axiom.
#
# <not-all-phi> is defined as <not(forall(vars,phi))>, which is of course
# translated to NNF. The parameters of the new axioms are exactly the free
# variables of <forall(vars, phi)>.
def remove_universal_quantifiers(task):
def recurse(condition):
# Uses new_axioms_by_condition and type_map from surrounding scope.
if isinstance(condition, pddl.UniversalCondition):
axiom_condition = condition.negate()
parameters = sorted(axiom_condition.free_variables())
typed_parameters = tuple(pddl.TypedObject(v, type_map[v]) for v in parameters)
axiom = new_axioms_by_condition.get((axiom_condition, typed_parameters))
if not axiom:
condition = recurse(axiom_condition)
axiom = task.add_axiom(list(typed_parameters), condition)
new_axioms_by_condition[(condition, typed_parameters)] = axiom
return pddl.NegatedAtom(axiom.name, parameters)
else:
new_parts = [recurse(part) for part in condition.parts]
return condition.change_parts(new_parts)
new_axioms_by_condition = {}
for proxy in tuple(all_conditions(task)):
# Cannot use generator because we add new axioms on the fly.
if proxy.condition.has_universal_part():
type_map = proxy.get_type_map()
proxy.set(recurse(proxy.condition))
# [2] Pull disjunctions to the root of the condition.
#
# After removing universal quantifiers, the (k-ary generalization of the)
# following rules suffice for doing that:
# (1) or(phi, or(psi, psi')) == or(phi, psi, psi')
# (2) exists(vars, or(phi, psi)) == or(exists(vars, phi), exists(vars, psi))
# (3) and(phi, or(psi, psi')) == or(and(phi, psi), and(phi, psi'))
def build_DNF(task):
def recurse(condition):
disjunctive_parts = []
other_parts = []
for part in condition.parts:
part = recurse(part)
if isinstance(part, pddl.Disjunction):
disjunctive_parts.append(part)
else:
other_parts.append(part)
if not disjunctive_parts:
return condition
# Rule (1): Associativity of disjunction.
if isinstance(condition, pddl.Disjunction):
result_parts = other_parts
for part in disjunctive_parts:
result_parts.extend(part.parts)
return pddl.Disjunction(result_parts)
# Rule (2): Distributivity disjunction/existential quantification.
if isinstance(condition, pddl.ExistentialCondition):
parameters = condition.parameters
result_parts = [pddl.ExistentialCondition(parameters, (part,))
for part in disjunctive_parts[0].parts]
return pddl.Disjunction(result_parts)
# Rule (3): Distributivity disjunction/conjunction.
assert isinstance(condition, pddl.Conjunction)
result_parts = [pddl.Conjunction(other_parts)]
while disjunctive_parts:
previous_result_parts = result_parts
result_parts = []
parts_to_distribute = disjunctive_parts.pop().parts
for part1 in previous_result_parts:
for part2 in parts_to_distribute:
result_parts.append(pddl.Conjunction((part1, part2)))
return pddl.Disjunction(result_parts)
for proxy in all_conditions(task):
if proxy.condition.has_disjunction():
proxy.set(recurse(proxy.condition).simplified())
# [3] Split conditions at the outermost disjunction.
def split_disjunctions(task):
for proxy in tuple(all_conditions(task)):
# Cannot use generator directly because we add/delete entries.
if isinstance(proxy.condition, pddl.Disjunction):
for part in proxy.condition.parts:
new_proxy = proxy.clone_owner()
new_proxy.set(part)
new_proxy.register_owner(task)
proxy.delete_owner(task)
# [4] Pull existential quantifiers out of conjunctions and group them.
#
# After removing universal quantifiers and creating the disjunctive form,
# only the following (representatives of) rules are needed:
# (1) exists(vars, exists(vars', phi)) == exists(vars + vars', phi)
# (2) and(phi, exists(vars, psi)) == exists(vars, and(phi, psi)),
# if var does not occur in phi as a free variable.
def move_existential_quantifiers(task):
def recurse(condition):
existential_parts = []
other_parts = []
for part in condition.parts:
part = recurse(part)
if isinstance(part, pddl.ExistentialCondition):
existential_parts.append(part)
else:
other_parts.append(part)
if not existential_parts:
return condition
# Rule (1): Combine nested quantifiers.
if isinstance(condition, pddl.ExistentialCondition):
new_parameters = condition.parameters + existential_parts[0].parameters
new_parts = existential_parts[0].parts
return pddl.ExistentialCondition(new_parameters, new_parts)
# Rule (2): Pull quantifiers out of conjunctions.
assert isinstance(condition, pddl.Conjunction)
new_parameters = []
new_conjunction_parts = other_parts
for part in existential_parts:
new_parameters += part.parameters
new_conjunction_parts += part.parts
new_conjunction = pddl.Conjunction(new_conjunction_parts)
return pddl.ExistentialCondition(new_parameters, (new_conjunction,))
for proxy in all_conditions(task):
if proxy.condition.has_existential_part():
proxy.set(recurse(proxy.condition).simplified())
# [5a] Drop existential quantifiers from axioms, turning them
# into parameters.
def eliminate_existential_quantifiers_from_axioms(task):
# Note: This is very redundant with the corresponding method for
# actions and could easily be merged if axioms and actions were
# unified.
for axiom in task.axioms:
precond = axiom.condition
if isinstance(precond, pddl.ExistentialCondition):
# Copy parameter list, since it can be shared with
# parameter lists of other versions of this axiom (e.g.
# created when splitting up disjunctive preconditions).
axiom.parameters = list(axiom.parameters)
axiom.parameters.extend(precond.parameters)
axiom.condition = precond.parts[0]
# [5b] Drop existential quantifiers from action preconditions,
# turning them into action parameters (that don't form part of the
# name of the action).
def eliminate_existential_quantifiers_from_preconditions(task):
for action in task.actions:
precond = action.precondition
if isinstance(precond, pddl.ExistentialCondition):
# Copy parameter list, since it can be shared with
# parameter lists of other versions of this action (e.g.
# created when splitting up disjunctive preconditions).
action.parameters = list(action.parameters)
action.parameters.extend(precond.parameters)
action.precondition = precond.parts[0]
# [5c] Eliminate existential quantifiers from effect conditions
#
# For effect conditions, we replace "when exists(x, phi) then e" with
# "forall(x): when phi then e.
def eliminate_existential_quantifiers_from_conditional_effects(task):
for action in task.actions:
for effect in action.effects:
condition = effect.condition
if isinstance(condition, pddl.ExistentialCondition):
effect.parameters = list(effect.parameters)
effect.parameters.extend(condition.parameters)
effect.condition = condition.parts[0]
def substitute_complicated_goal(task):
goal = task.goal
if isinstance(goal, pddl.Literal):
return
elif isinstance(goal, pddl.Conjunction):
for item in goal.parts:
if not isinstance(item, pddl.Literal):
break
else:
return
new_axiom = task.add_axiom([], goal)
task.goal = pddl.Atom(new_axiom.name, new_axiom.parameters)
# Combine Steps [1], [2], [3], [4], [5] and do some additional verification
# that the task makes sense.
def normalize(task):
remove_universal_quantifiers(task)
substitute_complicated_goal(task)
build_DNF(task)
split_disjunctions(task)
move_existential_quantifiers(task)
eliminate_existential_quantifiers_from_axioms(task)
eliminate_existential_quantifiers_from_preconditions(task)
eliminate_existential_quantifiers_from_conditional_effects(task)
verify_axiom_predicates(task)
def verify_axiom_predicates(task):
# Verify that derived predicates are not used in :init or
# action effects.
axiom_names = set()
for axiom in task.axioms:
axiom_names.add(axiom.name)
for fact in task.init:
# Note that task.init can contain the assignment to (total-cost)
# in addition to regular atoms.
if getattr(fact, "predicate", None) in axiom_names:
raise SystemExit(
"error: derived predicate %r appears in :init fact '%s'" %
(fact.predicate, fact))
for action in task.actions:
for effect in action.effects:
if effect.literal.predicate in axiom_names:
raise SystemExit(
"error: derived predicate %r appears in effect of action %r" %
(effect.literal.predicate, action.name))
# [6] Build rules for exploration component.
def build_exploration_rules(task):
result = []
for proxy in all_conditions(task):
proxy.build_rules(result)
return result
def condition_to_rule_body(parameters, condition):
result = []
for par in parameters:
result.append(par.get_atom())
if not isinstance(condition, pddl.Truth):
if isinstance(condition, pddl.ExistentialCondition):
for par in condition.parameters:
result.append(par.get_atom())
condition = condition.parts[0]
if isinstance(condition, pddl.Conjunction):
parts = condition.parts
else:
parts = (condition,)
for part in parts:
if isinstance(part, pddl.Falsity):
# Use an atom in the body that is always false because
# it is not initially true and doesn't occur in the
# head of any rule.
return [pddl.Atom("@always-false", [])]
assert isinstance(part, pddl.Literal), "Condition not normalized: %r" % part
if not part.negated:
result.append(part)
return result
if __name__ == "__main__":
import pddl_parser
task = pddl_parser.open()
normalize(task)
task.dump()
| 16,177 | 36.105505 | 90 | py |
DAAISy | DAAISy-main/src/utils/translate/sas_tasks.py | SAS_FILE_VERSION = 3
DEBUG = False
class SASTask:
"""Planning task in finite-domain representation.
The user is responsible for making sure that the data fits a
number of structural restrictions. For example, conditions should
generally be sorted and mention each variable at most once. See
the validate methods for details."""
def __init__(self, variables, mutexes, init, goal,
operators, axioms, metric):
self.variables = variables
self.mutexes = mutexes
self.init = init
self.goal = goal
self.operators = sorted(operators, key=lambda op: (
op.name, op.prevail, op.pre_post))
self.axioms = sorted(axioms, key=lambda axiom: (
axiom.condition, axiom.effect))
self.metric = metric
if DEBUG:
self.validate()
def validate(self):
"""Fail an assertion if the task is invalid.
A task is valid if all its components are valid. Valid tasks
are almost in a kind of "canonical form", but not quite. For
example, operators and axioms are permitted to be listed in
any order, even though it would be possible to require some
kind of canonical sorting.
Note that we require that all derived variables are binary.
This is stricter than what later parts of the planner are
supposed to handle, but some parts of the translator rely on
this. We might want to consider making this a general
requirement throughout the planner.
Note also that there is *no* general rule on what the init (=
fallback) value of a derived variable is. For example, in
PSR-Large #1, it can be either 0 or 1. While it is "usually"
1, code should not rely on this.
"""
self.variables.validate()
for mutex in self.mutexes:
mutex.validate(self.variables)
self.init.validate(self.variables)
self.goal.validate(self.variables)
for op in self.operators:
op.validate(self.variables)
for axiom in self.axioms:
axiom.validate(self.variables, self.init)
assert self.metric is False or self.metric is True, self.metric
def dump(self):
print("variables:")
self.variables.dump()
print("%d mutex groups:" % len(self.mutexes))
for mutex in self.mutexes:
print("group:")
mutex.dump()
print("init:")
self.init.dump()
print("goal:")
self.goal.dump()
print("%d operators:" % len(self.operators))
for operator in self.operators:
operator.dump()
print("%d axioms:" % len(self.axioms))
for axiom in self.axioms:
axiom.dump()
print("metric: %s" % self.metric)
def output(self, stream):
print("begin_version", file=stream)
print(SAS_FILE_VERSION, file=stream)
print("end_version", file=stream)
print("begin_metric", file=stream)
print(int(self.metric), file=stream)
print("end_metric", file=stream)
self.variables.output(stream)
print(len(self.mutexes), file=stream)
for mutex in self.mutexes:
mutex.output(stream)
self.init.output(stream)
self.goal.output(stream)
print(len(self.operators), file=stream)
for op in self.operators:
op.output(stream)
print(len(self.axioms), file=stream)
for axiom in self.axioms:
axiom.output(stream)
def get_encoding_size(self):
task_size = 0
task_size += self.variables.get_encoding_size()
for mutex in self.mutexes:
task_size += mutex.get_encoding_size()
task_size += self.goal.get_encoding_size()
for op in self.operators:
task_size += op.get_encoding_size()
for axiom in self.axioms:
task_size += axiom.get_encoding_size()
return task_size
class SASVariables:
def __init__(self, ranges, axiom_layers, value_names):
self.ranges = ranges
self.axiom_layers = axiom_layers
self.value_names = value_names
def validate(self):
"""Validate variables.
All variables must have range at least 2, and derived
variables must have range exactly 2. See comment on derived
variables in the docstring of SASTask.validate.
"""
assert len(self.ranges) == len(self.axiom_layers) == len(
self.value_names)
for (var_range, layer, var_value_names) in zip(
self.ranges, self.axiom_layers, self.value_names):
assert var_range == len(var_value_names)
assert var_range >= 2
assert layer == -1 or layer >= 0
if layer != -1:
assert var_range == 2
def validate_fact(self, fact):
"""Assert that fact is a valid (var, value) pair."""
var, value = fact
assert 0 <= var < len(self.ranges)
assert 0 <= value < self.ranges[var]
def validate_condition(self, condition):
"""Assert that the condition (list of facts) is sorted, mentions each
variable at most once, and only consists of valid facts."""
last_var = -1
for (var, value) in condition:
self.validate_fact((var, value))
assert var > last_var
last_var = var
def dump(self):
for var, (rang, axiom_layer) in enumerate(
zip(self.ranges, self.axiom_layers)):
if axiom_layer != -1:
axiom_str = " [axiom layer %d]" % axiom_layer
else:
axiom_str = ""
print("v%d in {%s}%s" % (var, list(range(rang)), axiom_str))
def output(self, stream):
print(len(self.ranges), file=stream)
for var, (rang, axiom_layer, values) in enumerate(zip(
self.ranges, self.axiom_layers, self.value_names)):
print("begin_variable", file=stream)
print("var%d" % var, file=stream)
print(axiom_layer, file=stream)
print(rang, file=stream)
assert rang == len(values), (rang, values)
for value in values:
print(value, file=stream)
print("end_variable", file=stream)
def get_encoding_size(self):
# A variable with range k has encoding size k + 1 to also give the
# variable itself some weight.
return len(self.ranges) + sum(self.ranges)
class SASMutexGroup:
def __init__(self, facts):
self.facts = sorted(facts)
def validate(self, variables):
"""Assert that the facts in the mutex group are sorted and unique
and that they are all valid."""
for fact in self.facts:
variables.validate_fact(fact)
assert self.facts == sorted(set(self.facts))
def dump(self):
for var, val in self.facts:
print("v%d: %d" % (var, val))
def output(self, stream):
print("begin_mutex_group", file=stream)
print(len(self.facts), file=stream)
for var, val in self.facts:
print(var, val, file=stream)
print("end_mutex_group", file=stream)
def get_encoding_size(self):
return len(self.facts)
class SASInit:
def __init__(self, values):
self.values = values
def validate(self, variables):
"""Validate initial state.
Assert that the initial state contains the correct number of
values and that all values are in range.
"""
assert len(self.values) == len(variables.ranges)
for fact in enumerate(self.values):
variables.validate_fact(fact)
def dump(self):
for var, val in enumerate(self.values):
print("v%d: %d" % (var, val))
def output(self, stream):
print("begin_state", file=stream)
for val in self.values:
print(val, file=stream)
print("end_state", file=stream)
class SASGoal:
def __init__(self, pairs):
self.pairs = sorted(pairs)
def validate(self, variables):
"""Assert that the goal is nonempty and a valid condition."""
assert self.pairs
variables.validate_condition(self.pairs)
def dump(self):
for var, val in self.pairs:
print("v%d: %d" % (var, val))
def output(self, stream):
print("begin_goal", file=stream)
print(len(self.pairs), file=stream)
for var, val in self.pairs:
print(var, val, file=stream)
print("end_goal", file=stream)
def get_encoding_size(self):
return len(self.pairs)
class SASOperator:
def __init__(self, name, prevail, pre_post, cost):
self.name = name
self.prevail = sorted(prevail)
self.pre_post = self._canonical_pre_post(pre_post)
self.cost = cost
def _canonical_pre_post(self, pre_post):
# Return a sorted and uniquified version of pre_post. We would
# like to just use sorted(set(pre_post)), but this fails because
# the effect conditions are a list and hence not hashable.
def tuplify(entry):
var, pre, post, cond = entry
return var, pre, post, tuple(cond)
def listify(entry):
var, pre, post, cond = entry
return var, pre, post, list(cond)
pre_post = map(tuplify, pre_post)
pre_post = sorted(set(pre_post))
pre_post = list(map(listify, pre_post))
return pre_post
def validate(self, variables):
"""Validate the operator.
Assert that
1. Prevail conditions are valid conditions (i.e., sorted and
all referring to different variables)
2. The pre_post list is sorted by (var, pre, post, cond), and the
same (var, pre, post, cond) 4-tuple is not repeated.
3. Effect conditions are valid conditions and do not contain variables
from the pre- or prevail conditions.
4. Variables occurring in pre_post rules do not have a prevail
condition.
5. Preconditions in pre_post are -1 or valid facts.
6. Effects are valid facts.
7. Effect variables are non-derived.
8. If a variable has multiple pre_post rules, then pre is
identical in all these rules.
9. There is at least one effect.
10. Costs are non-negative integers.
Odd things that are *not* illegal:
- The effect in a pre_post rule may be identical to the
precondition or to an effect condition of that effect.
TODO/open question:
- It is currently not very clear what the semantics of operators
should be when effects "conflict", i.e., when multiple effects
trigger and want to set a given variable to two different
values. In the case where both are unconditional effects, we
should make sure that our representation doesn't actually
contain two such effects, but when at least one of them is
conditional, things are not so easy.
To make our life simpler when generating SAS+ tasks from
PDDL tasks, it probably makes most sense to generalize the
PDDL rule in this case: there is a value order where certain
values "win" over others in this situation. It probably
makes sense to say the "highest" values should win in this
case, because that's consistent with the PDDL rules if we
say false = 0 and true = 1, and also with our sort order of
effects it means we get the right result if we just apply
effects in sequence.
But whatever we end up deciding, we need to be clear about it,
document it and make sure that all of our code knows the rules
and follows them.
"""
variables.validate_condition(self.prevail)
assert self.pre_post == self._canonical_pre_post(self.pre_post)
prevail_vars = {var for (var, value) in self.prevail}
pre_values = {}
for var, pre, post, cond in self.pre_post:
variables.validate_condition(cond)
assert var not in prevail_vars
if pre != -1:
variables.validate_fact((var, pre))
variables.validate_fact((var, post))
assert variables.axiom_layers[var] == -1
if var in pre_values:
assert pre_values[var] == pre
else:
pre_values[var] = pre
for var, pre, post, cond in self.pre_post:
for cvar, cval in cond:
assert (cvar not in pre_values or pre_values[cvar] == -1)
assert (cvar not in prevail_vars)
assert self.pre_post
assert self.cost >= 0 and self.cost == int(self.cost)
def dump(self):
print(self.name)
print("Prevail:")
for var, val in self.prevail:
print(" v%d: %d" % (var, val))
print("Pre/Post:")
for var, pre, post, cond in self.pre_post:
if cond:
cond_str = " [%s]" % ", ".join(
["%d: %d" % tuple(c) for c in cond])
else:
cond_str = ""
print(" v%d: %d -> %d%s" % (var, pre, post, cond_str))
def output(self, stream):
print("begin_operator", file=stream)
print(self.name[1:-1], file=stream)
print(len(self.prevail), file=stream)
for var, val in self.prevail:
print(var, val, file=stream)
print(len(self.pre_post), file=stream)
for var, pre, post, cond in self.pre_post:
print(len(cond), end=' ', file=stream)
for cvar, cval in cond:
print(cvar, cval, end=' ', file=stream)
print(var, pre, post, file=stream)
print(self.cost, file=stream)
print("end_operator", file=stream)
def get_encoding_size(self):
size = 1 + len(self.prevail)
for var, pre, post, cond in self.pre_post:
size += 1 + len(cond)
if pre != -1:
size += 1
return size
def get_applicability_conditions(self):
"""Return the combined applicability conditions
(prevail conditions and preconditions) of the operator.
Returns a sorted list of (var, value) pairs. This is
guaranteed to contain at most one fact per variable and
must hence be non-contradictory."""
conditions = {}
for var, val in self.prevail:
assert var not in conditions
conditions[var] = val
for var, pre, post, cond in self.pre_post:
if pre != -1:
assert var not in conditions or conditions[var] == pre
conditions[var] = pre
return sorted(conditions.items())
class SASAxiom:
def __init__(self, condition, effect):
self.condition = sorted(condition)
self.effect = effect
assert self.effect[1] in (0, 1)
for _, val in condition:
assert val >= 0, condition
def validate(self, variables, init):
"""Validate the axiom.
Assert that the axiom condition is a valid condition, that the
effect is a valid fact, that the effect variable is a derived
variable, and that the layering condition is satisfied.
See the docstring of SASTask.validate for information on the
restriction on derived variables. The layering condition boils
down to:
1. Axioms always set the "non-init" value of the derived
variable.
2. Derived variables in the condition must have a lower of
equal layer to derived variables appearing in the effect.
3. Conditions with equal layer are only allowed when the
condition uses the "non-init" value of that variable.
TODO/bug: rule #1 is currently disabled because we currently
have axioms that violate it. This is likely due to the
"extended domain transition graphs" described in the Fast
Downward paper, Section 5.1. However, we want to eventually
changes this. See issue454. For cases where rule #1 is violated,
"non-init" should be "init" in rule #3.
"""
variables.validate_condition(self.condition)
variables.validate_fact(self.effect)
eff_var, eff_value = self.effect
eff_layer = variables.axiom_layers[eff_var]
assert eff_layer >= 0
eff_init_value = init.values[eff_var]
## The following rule is currently commented out because of
## the TODO/bug mentioned in the docstring.
# assert eff_value != eff_init_value
for cond_var, cond_value in self.condition:
cond_layer = variables.axiom_layers[cond_var]
if cond_layer != -1:
assert cond_layer <= eff_layer
if cond_layer == eff_layer:
cond_init_value = init.values[cond_var]
## Once the TODO/bug above is addressed, the
## following four lines can be simplified because
## we are guaranteed to land in the "if" branch.
if eff_value != eff_init_value:
assert cond_value != cond_init_value
else:
assert cond_value == cond_init_value
def dump(self):
print("Condition:")
for var, val in self.condition:
print(" v%d: %d" % (var, val))
print("Effect:")
var, val = self.effect
print(" v%d: %d" % (var, val))
def output(self, stream):
print("begin_rule", file=stream)
print(len(self.condition), file=stream)
for var, val in self.condition:
print(var, val, file=stream)
var, val = self.effect
print(var, 1 - val, val, file=stream)
print("end_rule", file=stream)
def get_encoding_size(self):
return 1 + len(self.condition)
| 18,068 | 36.64375 | 78 | py |
DAAISy | DAAISy-main/src/utils/translate/sccs.py | """Tarjan's algorithm for maximal strongly connected components.
We provide two versions of the algorithm for different graph
representations.
Since the original recursive version exceeds python's maximal
recursion depth on some planning instances, this is an iterative
version with an explicit recursion stack (iter_stack).
Note that the derived graph where each SCC is a single "supernode" is
necessarily acyclic. The SCCs returned by the algorithm are in a
topological sort order with respect to this derived DAG.
"""
from collections import defaultdict
__all__ = ["get_sccs_adjacency_list", "get_sccs_adjacency_dict"]
def get_sccs_adjacency_list(adjacency_list):
"""Compute SCCs for a graph represented as an adjacency list.
`adjacency_list` is a list (or similar data structure) whose
indices correspond to the graph nodes. For example, if
`len(adjacency_list)` is N, the graph nodes are {0, ..., N-1}.
For every node `u`, `adjacency_list[u]` is the list (or similar data
structure) of successors of `u`.
Returns a list of lists that defines a partition of {0, ..., N-1},
where each block in the partition is an SCC of the graph, and
the partition is given in a topologically sort order."""
return StronglyConnectedComponentComputation(adjacency_list).get_result()
def get_sccs_adjacency_dict(adjacency_dict):
"""Compute SCCs for a graph represented as an adjacency dict.
`adjacency_dict` is a dictionary whose keys are the vertices of
the graph.
For every node `u`, adjacency_dict[u]` is the list (or similar
data structure) of successors of `u`.
Returns a list of lists that defines a partition of the graph
nodes, where each block in the partition is an SCC of the graph,
and the partition is given in a topologically sort order."""
node_to_index = {}
index_to_node = []
for index, node in enumerate(adjacency_dict):
node_to_index[node] = index
index_to_node.append(node)
adjacency_list = []
for index, node in enumerate(index_to_node):
successors = adjacency_dict[node]
successor_indices = [node_to_index[v] for v in successors]
adjacency_list.append(successor_indices)
result_indices = get_sccs_adjacency_list(adjacency_list)
result = []
for block_indices in result_indices:
block = [index_to_node[index] for index in block_indices]
result.append(block)
return result
class StronglyConnectedComponentComputation:
def __init__(self, unweighted_graph):
self.graph = unweighted_graph
self.BEGIN, self.CONTINUE, self.RETURN = 0, 1, 2 # "recursion" handling
def get_result(self):
self.indices = dict()
self.lowlinks = defaultdict(lambda: -1)
self.stack_indices = dict()
self.current_index = 0
self.stack = []
self.sccs = []
for i in range(len(self.graph)):
if i not in self.indices:
self.visit(i)
self.sccs.reverse()
return self.sccs
def visit(self, vertex):
iter_stack = [(vertex, None, None, self.BEGIN)]
while iter_stack:
v, w, succ_index, state = iter_stack.pop()
if state == self.BEGIN:
self.current_index += 1
self.indices[v] = self.current_index
self.lowlinks[v] = self.current_index
self.stack_indices[v] = len(self.stack)
self.stack.append(v)
iter_stack.append((v, None, 0, self.CONTINUE))
elif state == self.CONTINUE:
successors = self.graph[v]
if succ_index == len(successors):
if self.lowlinks[v] == self.indices[v]:
stack_index = self.stack_indices[v]
scc = self.stack[stack_index:]
del self.stack[stack_index:]
for n in scc:
del self.stack_indices[n]
self.sccs.append(scc)
else:
w = successors[succ_index]
if w not in self.indices:
iter_stack.append((v, w, succ_index, self.RETURN))
iter_stack.append((w, None, None, self.BEGIN))
else:
if w in self.stack_indices:
self.lowlinks[v] = min(self.lowlinks[v],
self.indices[w])
iter_stack.append(
(v, None, succ_index + 1, self.CONTINUE))
elif state == self.RETURN:
self.lowlinks[v] = min(self.lowlinks[v], self.lowlinks[w])
iter_stack.append((v, None, succ_index + 1, self.CONTINUE))
| 4,837 | 37.704 | 80 | py |
DAAISy | DAAISy-main/src/utils/translate/pddl_fd/f_expression.py | class FunctionalExpression:
def __init__(self, parts):
self.parts = tuple(parts)
def dump(self, indent=" "):
print("%s%s" % (indent, self._dump()))
for part in self.parts:
part.dump(indent + " ")
def _dump(self):
return self.__class__.__name__
def instantiate(self, var_mapping, init_facts):
raise ValueError("Cannot instantiate condition: not normalized")
class NumericConstant(FunctionalExpression):
parts = ()
def __init__(self, value):
if value != int(value):
raise ValueError("Fractional numbers are not supported")
self.value = int(value)
def __eq__(self, other):
return (self.__class__ == other.__class__ and self.value == other.value)
def __str__(self):
return "%s %s" % (self.__class__.__name__, self.value)
def _dump(self):
return str(self)
def instantiate(self, var_mapping, init_facts):
return self
class PrimitiveNumericExpression(FunctionalExpression):
parts = ()
def __init__(self, symbol, args):
self.symbol = symbol
self.args = tuple(args)
self.hash = hash((self.__class__, self.symbol, self.args))
def __hash__(self):
return self.hash
def __eq__(self, other):
return (self.__class__ == other.__class__ and self.symbol == other.symbol
and self.args == other.args)
def __str__(self):
return "%s %s(%s)" % ("PNE", self.symbol, ", ".join(map(str, self.args)))
def dump(self, indent=" "):
print("%s%s" % (indent, self._dump()))
def _dump(self):
return str(self)
def instantiate(self, var_mapping, init_assignments):
args = [var_mapping.get(arg, arg) for arg in self.args]
pne = PrimitiveNumericExpression(self.symbol, args)
assert self.symbol != "total-cost"
# We know this expression is constant. Substitute it by corresponding
# initialization from task.
result = init_assignments.get(pne)
assert result is not None, "Could not find instantiation for PNE: %r" % (str(pne),)
return result
class FunctionAssignment:
def __init__(self, fluent, expression):
self.fluent = fluent
self.expression = expression
def __str__(self):
return "%s %s %s" % (self.__class__.__name__, self.fluent, self.expression)
def dump(self, indent=" "):
print("%s%s" % (indent, self._dump()))
self.fluent.dump(indent + " ")
self.expression.dump(indent + " ")
def _dump(self):
return self.__class__.__name__
def instantiate(self, var_mapping, init_facts):
if not (isinstance(self.expression, PrimitiveNumericExpression) or
isinstance(self.expression, NumericConstant)):
raise ValueError("Cannot instantiate assignment: not normalized")
# We know that this assignment is a cost effect of an action (for initial state
# assignments, "instantiate" is not called). Hence, we know that the fluent is
# the 0-ary "total-cost" which does not need to be instantiated
assert self.fluent.symbol == "total-cost"
fluent = self.fluent
expression = self.expression.instantiate(var_mapping, init_facts)
return self.__class__(fluent, expression)
class Assign(FunctionAssignment):
def __str__(self):
return "%s := %s" % (self.fluent, self.expression)
class Increase(FunctionAssignment):
pass
| 3,509 | 31.201835 | 91 | py |
DAAISy | DAAISy-main/src/utils/translate/pddl_fd/pddl_types.py | # Renamed from types.py to avoid clash with stdlib module.
# In the future, use explicitly relative imports or absolute
# imports as a better solution.
import itertools
def _get_type_predicate_name(type_name):
# PDDL allows mixing types and predicates, but some PDDL files
# have name collisions between types and predicates. We want to
# support both the case where such name collisions occur and the
# case where types are used as predicates.
#
# We internally give types predicate names that cannot be confused
# with non-type predicates. When the input uses a PDDL type as a
# predicate, we automatically map it to this internal name.
return "type@%s" % type_name
class Type:
def __init__(self, name, basetype_name=None):
self.name = name
self.basetype_name = basetype_name
def __str__(self):
return self.name
def __repr__(self):
return "Type(%s, %s)" % (self.name, self.basetype_name)
def get_predicate_name(self):
return _get_type_predicate_name(self.name)
class TypedObject:
def __init__(self, name, type_name):
self.name = name
self.type_name = type_name
def __hash__(self):
return hash((self.name, self.type_name))
def __eq__(self, other):
return self.name == other.name and self.type_name == other.type_name
def __ne__(self, other):
return not self == other
def __str__(self):
return "%s: %s" % (self.name, self.type_name)
def __repr__(self):
return "<TypedObject %s: %s>" % (self.name, self.type_name)
def uniquify_name(self, type_map, renamings):
if self.name not in type_map:
type_map[self.name] = self.type_name
return self
for counter in itertools.count(1):
new_name = self.name + str(counter)
if new_name not in type_map:
renamings[self.name] = new_name
type_map[new_name] = self.type_name
return TypedObject(new_name, self.type_name)
def get_atom(self):
# TODO: Resolve cyclic import differently.
from . import conditions
predicate_name = _get_type_predicate_name(self.type_name)
return conditions.Atom(predicate_name, [self.name])
| 2,290 | 31.267606 | 76 | py |
DAAISy | DAAISy-main/src/utils/translate/pddl_fd/effects.py | from . import conditions
def cartesian_product(*sequences):
# TODO: Also exists in tools.py outside the pddl package (defined slightly
# differently). Not good. Need proper import paths.
if not sequences:
yield ()
else:
for tup in cartesian_product(*sequences[1:]):
for item in sequences[0]:
yield (item,) + tup
class Effect:
def __init__(self, parameters, condition, literal):
self.parameters = parameters
self.condition = condition
self.literal = literal
def __eq__(self, other):
return (self.__class__ is other.__class__ and
self.parameters == other.parameters and
self.condition == other.condition and
self.literal == other.literal)
def dump(self):
indent = " "
if self.parameters:
print("%sforall %s" % (indent, ", ".join(map(str, self.parameters))))
indent += " "
if self.condition != conditions.Truth():
print("%sif" % indent)
self.condition.dump(indent + " ")
print("%sthen" % indent)
indent += " "
print("%s%s" % (indent, self.literal))
def copy(self):
return Effect(self.parameters, self.condition, self.literal)
def uniquify_variables(self, type_map):
renamings = {}
self.parameters = [par.uniquify_name(type_map, renamings)
for par in self.parameters]
self.condition = self.condition.uniquify_variables(type_map, renamings)
self.literal = self.literal.rename_variables(renamings)
def instantiate(self, var_mapping, init_facts, fluent_facts,
objects_by_type, result):
if self.parameters:
var_mapping = var_mapping.copy() # Will modify this.
object_lists = [objects_by_type.get(par.type_name, [])
for par in self.parameters]
for object_tuple in cartesian_product(*object_lists):
for (par, obj) in zip(self.parameters, object_tuple):
var_mapping[par.name] = obj
self._instantiate(var_mapping, init_facts, fluent_facts, result)
else:
self._instantiate(var_mapping, init_facts, fluent_facts, result)
def _instantiate(self, var_mapping, init_facts, fluent_facts, result):
condition = []
try:
self.condition.instantiate(var_mapping, init_facts, fluent_facts, condition)
except conditions.Impossible:
return
effects = []
self.literal.instantiate(var_mapping, init_facts, fluent_facts, effects)
assert len(effects) <= 1
if effects:
result.append((condition, effects[0]))
def relaxed(self):
if self.literal.negated:
return None
else:
return Effect(self.parameters, self.condition.relaxed(), self.literal)
def simplified(self):
return Effect(self.parameters, self.condition.simplified(), self.literal)
class ConditionalEffect:
def __init__(self, condition, effect):
if isinstance(effect, ConditionalEffect):
self.condition = conditions.Conjunction([condition, effect.condition])
self.effect = effect.effect
else:
self.condition = condition
self.effect = effect
def dump(self, indent=" "):
print("%sif" % (indent))
self.condition.dump(indent + " ")
print("%sthen" % (indent))
self.effect.dump(indent + " ")
def normalize(self):
norm_effect = self.effect.normalize()
if isinstance(norm_effect, ConjunctiveEffect):
new_effects = []
for effect in norm_effect.effects:
assert isinstance(effect, SimpleEffect) or isinstance(effect, ConditionalEffect)
new_effects.append(ConditionalEffect(self.condition, effect))
return ConjunctiveEffect(new_effects)
elif isinstance(norm_effect, UniversalEffect):
child = norm_effect.effect
cond_effect = ConditionalEffect(self.condition, child)
return UniversalEffect(norm_effect.parameters, cond_effect)
else:
return ConditionalEffect(self.condition, norm_effect)
def extract_cost(self):
return None, self
class UniversalEffect:
def __init__(self, parameters, effect):
if isinstance(effect, UniversalEffect):
self.parameters = parameters + effect.parameters
self.effect = effect.effect
else:
self.parameters = parameters
self.effect = effect
def dump(self, indent=" "):
print("%sforall %s" % (indent, ", ".join(map(str, self.parameters))))
self.effect.dump(indent + " ")
def normalize(self):
norm_effect = self.effect.normalize()
if isinstance(norm_effect, ConjunctiveEffect):
new_effects = []
for effect in norm_effect.effects:
assert isinstance(effect, SimpleEffect) or isinstance(effect, ConditionalEffect) \
or isinstance(effect, UniversalEffect)
new_effects.append(UniversalEffect(self.parameters, effect))
return ConjunctiveEffect(new_effects)
else:
return UniversalEffect(self.parameters, norm_effect)
def extract_cost(self):
return None, self
class ConjunctiveEffect:
def __init__(self, effects):
flattened_effects = []
for effect in effects:
if isinstance(effect, ConjunctiveEffect):
flattened_effects += effect.effects
else:
flattened_effects.append(effect)
self.effects = flattened_effects
def dump(self, indent=" "):
print("%sand" % (indent))
for eff in self.effects:
eff.dump(indent + " ")
def normalize(self):
new_effects = []
for effect in self.effects:
new_effects.append(effect.normalize())
return ConjunctiveEffect(new_effects)
def extract_cost(self):
new_effects = []
cost_effect = None
for effect in self.effects:
if isinstance(effect, CostEffect):
cost_effect = effect
else:
new_effects.append(effect)
return cost_effect, ConjunctiveEffect(new_effects)
class SimpleEffect:
def __init__(self, effect):
self.effect = effect
def dump(self, indent=" "):
print("%s%s" % (indent, self.effect))
def normalize(self):
return self
def extract_cost(self):
return None, self
class CostEffect:
def __init__(self, effect):
self.effect = effect
def dump(self, indent=" "):
print("%s%s" % (indent, self.effect))
def normalize(self):
return self
def extract_cost(self):
# This only happens if an action has no effect apart from the cost effect.
return self, None
| 7,057 | 33.262136 | 98 | py |
DAAISy | DAAISy-main/src/utils/translate/pddl_fd/functions.py | class Function:
def __init__(self, name, arguments, type_name):
self.name = name
self.arguments = arguments
if type_name != "number":
raise SystemExit("Error: object fluents not supported\n" +
"(function %s has type %s)" % (name, type_name))
self.type_name = type_name
def __str__(self):
result = "%s(%s)" % (self.name, ", ".join(map(str, self.arguments)))
if self.type_name:
result += ": %s" % self.type_name
return result
| 542 | 35.2 | 77 | py |
DAAISy | DAAISy-main/src/utils/translate/pddl_fd/actions.py | import copy
from . import conditions
class Action:
def __init__(self, name, parameters, num_external_parameters,
precondition, effects, cost):
assert 0 <= num_external_parameters <= len(parameters)
self.name = name
self.parameters = parameters
# num_external_parameters denotes how many of the parameters
# are "external", i.e., should be part of the grounded action
# name. Usually all parameters are external, but "invisible"
# parameters can be created when compiling away existential
# quantifiers in conditions.
self.num_external_parameters = num_external_parameters
self.precondition = precondition
self.effects = effects
self.cost = cost
self.uniquify_variables() # TODO: uniquify variables in cost?
def __repr__(self):
return "<Action %r at %#x>" % (self.name, id(self))
def dump(self):
print("%s(%s)" % (self.name, ", ".join(map(str, self.parameters))))
print("Precondition:")
self.precondition.dump()
print("Effects:")
for eff in self.effects:
eff.dump()
print("Cost:")
if (self.cost):
self.cost.dump()
else:
print(" None")
def uniquify_variables(self):
self.type_map = {par.name: par.type_name for par in self.parameters}
self.precondition = self.precondition.uniquify_variables(self.type_map)
for effect in self.effects:
effect.uniquify_variables(self.type_map)
def relaxed(self):
new_effects = []
for eff in self.effects:
relaxed_eff = eff.relaxed()
if relaxed_eff:
new_effects.append(relaxed_eff)
return Action(self.name, self.parameters, self.num_external_parameters,
self.precondition.relaxed().simplified(),
new_effects)
def untyped(self):
# We do not actually remove the types from the parameter lists,
# just additionally incorporate them into the conditions.
# Maybe not very nice.
result = copy.copy(self)
parameter_atoms = [par.to_untyped_strips() for par in self.parameters]
new_precondition = self.precondition.untyped()
result.precondition = conditions.Conjunction(parameter_atoms + [new_precondition])
result.effects = [eff.untyped() for eff in self.effects]
return result
def instantiate(self, var_mapping, init_facts, init_assignments,
fluent_facts, objects_by_type, metric):
"""Return a PropositionalAction which corresponds to the instantiation of
this action with the arguments in var_mapping. Only fluent parts of the
conditions (those in fluent_facts) are included. init_facts are evaluated
while instantiating.
Precondition and effect conditions must be normalized for this to work.
Returns None if var_mapping does not correspond to a valid instantiation
(because it has impossible preconditions or an empty effect list.)"""
arg_list = [var_mapping[par.name]
for par in self.parameters[:self.num_external_parameters]]
name = "(%s %s)" % (self.name, " ".join(arg_list))
precondition = []
try:
self.precondition.instantiate(var_mapping, init_facts,
fluent_facts, precondition)
except conditions.Impossible:
return None
effects = []
for eff in self.effects:
eff.instantiate(var_mapping, init_facts, fluent_facts,
objects_by_type, effects)
if effects:
if metric:
if self.cost is None:
cost = 0
else:
cost = int(self.cost.instantiate(
var_mapping, init_assignments).expression.value)
else:
cost = 1
return PropositionalAction(name, precondition, effects, cost)
else:
return None
class PropositionalAction:
def __init__(self, name, precondition, effects, cost):
self.name = name
self.precondition = precondition
self.add_effects = []
self.del_effects = []
for condition, effect in effects:
if not effect.negated:
self.add_effects.append((condition, effect))
# Warning: This is O(N^2), could be turned into O(N).
# But that might actually harm performance, since there are
# usually few effects.
# TODO: Measure this in critical domains, then use sets if acceptable.
for condition, effect in effects:
if effect.negated and (condition, effect.negate()) not in self.add_effects:
self.del_effects.append((condition, effect.negate()))
self.cost = cost
def __repr__(self):
return "<PropositionalAction %r at %#x>" % (self.name, id(self))
def dump(self):
print(self.name)
for fact in self.precondition:
print("PRE: %s" % fact)
for cond, fact in self.add_effects:
print("ADD: %s -> %s" % (", ".join(map(str, cond)), fact))
for cond, fact in self.del_effects:
print("DEL: %s -> %s" % (", ".join(map(str, cond)), fact))
print("cost:", self.cost)
| 5,428 | 39.819549 | 90 | py |
DAAISy | DAAISy-main/src/utils/translate/pddl_fd/predicates.py | class Predicate:
def __init__(self, name, arguments):
self.name = name
self.arguments = arguments
def __str__(self):
return "%s(%s)" % (self.name, ", ".join(map(str, self.arguments)))
def get_arity(self):
return len(self.arguments)
| 278 | 24.363636 | 74 | py |
DAAISy | DAAISy-main/src/utils/translate/pddl_fd/__init__.py | from .actions import Action
from .actions import PropositionalAction
from .axioms import Axiom
from .axioms import PropositionalAxiom
from .conditions import Atom
from .conditions import Conjunction
from .conditions import Disjunction
from .conditions import ExistentialCondition
from .conditions import Falsity
from .conditions import Literal
from .conditions import NegatedAtom
from .conditions import Truth
from .conditions import UniversalCondition
from .effects import ConditionalEffect
from .effects import ConjunctiveEffect
from .effects import CostEffect
from .effects import Effect
from .effects import SimpleEffect
from .effects import UniversalEffect
from .f_expression import Assign
from .f_expression import Increase
from .f_expression import NumericConstant
from .f_expression import PrimitiveNumericExpression
from .functions import Function
from .pddl_types import Type
from .pddl_types import TypedObject
from .predicates import Predicate
from .tasks import Requirements
from .tasks import Task
| 1,012 | 32.766667 | 52 | py |
DAAISy | DAAISy-main/src/utils/translate/pddl_fd/tasks.py | from . import axioms
from . import predicates
class Task:
def __init__(self, domain_name, task_name, requirements,
types, objects, predicates, functions, init, goal,
actions, axioms, use_metric):
self.domain_name = domain_name
self.task_name = task_name
self.requirements = requirements
self.types = types
self.objects = objects
self.predicates = predicates
self.functions = functions
self.init = init
self.goal = goal
self.actions = actions
self.axioms = axioms
self.axiom_counter = 0
self.use_min_cost_metric = use_metric
def add_axiom(self, parameters, condition):
name = "new-axiom@%d" % self.axiom_counter
self.axiom_counter += 1
axiom = axioms.Axiom(name, parameters, len(parameters), condition)
self.predicates.append(predicates.Predicate(name, parameters))
self.axioms.append(axiom)
return axiom
def dump(self):
print("Problem %s: %s [%s]" % (
self.domain_name, self.task_name, self.requirements))
print("Types:")
for type in self.types:
print(" %s" % type)
print("Objects:")
for obj in self.objects:
print(" %s" % obj)
print("Predicates:")
for pred in self.predicates:
print(" %s" % pred)
print("Functions:")
for func in self.functions:
print(" %s" % func)
print("Init:")
for fact in self.init:
print(" %s" % fact)
print("Goal:")
self.goal.dump()
print("Actions:")
for action in self.actions:
action.dump()
if self.axioms:
print("Axioms:")
for axiom in self.axioms:
axiom.dump()
class Requirements:
def __init__(self, requirements):
self.requirements = requirements
for req in requirements:
assert req in (
":strips", ":adl", ":typing", ":negation", ":equality",
":negative-preconditions", ":disjunctive-preconditions",
":existential-preconditions", ":universal-preconditions",
":quantified-preconditions", ":conditional-effects",
":derived-predicates", ":action-costs"), req
def __str__(self):
return ", ".join(self.requirements)
| 2,426 | 32.246575 | 74 | py |
DAAISy | DAAISy-main/src/utils/translate/pddl_fd/axioms.py | from . import conditions
class Axiom:
def __init__(self, name, parameters, num_external_parameters, condition):
# For an explanation of num_external_parameters, see the
# related Action class. Note that num_external_parameters
# always equals the arity of the derived predicate.
assert 0 <= num_external_parameters <= len(parameters)
self.name = name
self.parameters = parameters
self.num_external_parameters = num_external_parameters
self.condition = condition
self.uniquify_variables()
def dump(self):
args = map(str, self.parameters[:self.num_external_parameters])
print("Axiom %s(%s)" % (self.name, ", ".join(args)))
self.condition.dump()
def uniquify_variables(self):
self.type_map = {par.name: par.type_name for par in self.parameters}
self.condition = self.condition.uniquify_variables(self.type_map)
def instantiate(self, var_mapping, init_facts, fluent_facts):
# The comments for Action.instantiate apply accordingly.
arg_list = [self.name] + [
var_mapping[par.name]
for par in self.parameters[:self.num_external_parameters]]
name = "(%s)" % " ".join(arg_list)
condition = []
try:
self.condition.instantiate(var_mapping, init_facts, fluent_facts, condition)
except conditions.Impossible:
return None
effect_args = [var_mapping.get(arg.name, arg.name)
for arg in self.parameters[:self.num_external_parameters]]
effect = conditions.Atom(self.name, effect_args)
return PropositionalAxiom(name, condition, effect)
class PropositionalAxiom:
def __init__(self, name, condition, effect):
self.name = name
self.condition = condition
self.effect = effect
def clone(self):
return PropositionalAxiom(self.name, list(self.condition), self.effect)
def dump(self):
if self.effect.negated:
print("not", end=' ')
print(self.name)
for fact in self.condition:
print("PRE: %s" % fact)
print("EFF: %s" % self.effect)
@property
def key(self):
return (self.name, self.condition, self.effect)
def __lt__(self, other):
return self.key < other.key
def __le__(self, other):
return self.key <= other.key
def __eq__(self, other):
return self.key == other.key
def __repr__(self):
return '<PropositionalAxiom %s %s -> %s>' % (
self.name, self.condition, self.effect)
| 2,609 | 32.896104 | 88 | py |
DAAISy | DAAISy-main/src/utils/translate/pddl_fd/conditions.py | # Conditions (of any type) are immutable, because they need to
# be hashed occasionally. Immutability also allows more efficient comparison
# based on a precomputed hash value.
#
# Careful: Most other classes (e.g. Effects, Axioms, Actions) are not!
class Condition:
def __init__(self, parts):
self.parts = tuple(parts)
self.hash = hash((self.__class__, self.parts))
def __hash__(self):
return self.hash
def __ne__(self, other):
return not self == other
def __lt__(self, other):
return self.hash < other.hash
def __le__(self, other):
return self.hash <= other.hash
def dump(self, indent=" "):
print("%s%s" % (indent, self._dump()))
for part in self.parts:
part.dump(indent + " ")
def _dump(self):
return self.__class__.__name__
def _postorder_visit(self, method_name, *args):
part_results = [part._postorder_visit(method_name, *args)
for part in self.parts]
method = getattr(self, method_name, self._propagate)
return method(part_results, *args)
def _propagate(self, parts, *args):
return self.change_parts(parts)
def simplified(self):
return self._postorder_visit("_simplified")
def relaxed(self):
return self._postorder_visit("_relaxed")
def untyped(self):
return self._postorder_visit("_untyped")
def uniquify_variables(self, type_map, renamings={}):
# Cannot used _postorder_visit because this requires preorder
# for quantified effects.
if not self.parts:
return self
else:
return self.__class__([part.uniquify_variables(type_map, renamings)
for part in self.parts])
def to_untyped_strips(self):
raise ValueError("Not a STRIPS condition: %s" % self.__class__.__name__)
def instantiate(self, var_mapping, init_facts, fluent_facts, result):
raise ValueError("Cannot instantiate condition: not normalized")
def free_variables(self):
result = set()
for part in self.parts:
result |= part.free_variables()
return result
def has_disjunction(self):
for part in self.parts:
if part.has_disjunction():
return True
return False
def has_existential_part(self):
for part in self.parts:
if part.has_existential_part():
return True
return False
def has_universal_part(self):
for part in self.parts:
if part.has_universal_part():
return True
return False
class ConstantCondition(Condition):
# Defining __eq__ blocks inheritance of __hash__, so must set it explicitly.
__hash__ = Condition.__hash__
parts = ()
def __init__(self):
self.hash = hash(self.__class__)
def change_parts(self, parts):
return self
def __eq__(self, other):
return self.__class__ is other.__class__
class Impossible(Exception):
pass
class Falsity(ConstantCondition):
def instantiate(self, var_mapping, init_facts, fluent_facts, result):
raise Impossible()
def negate(self):
return Truth()
class Truth(ConstantCondition):
def to_untyped_strips(self):
return []
def instantiate(self, var_mapping, init_facts, fluent_facts, result):
pass
def negate(self):
return Falsity()
class JunctorCondition(Condition):
# Defining __eq__ blocks inheritance of __hash__, so must set it explicitly.
__hash__ = Condition.__hash__
def __eq__(self, other):
# Compare hash first for speed reasons.
return (self.hash == other.hash and
self.__class__ is other.__class__ and
self.parts == other.parts)
def change_parts(self, parts):
return self.__class__(parts)
class Conjunction(JunctorCondition):
def _simplified(self, parts):
result_parts = []
for part in parts:
if isinstance(part, Conjunction):
result_parts += part.parts
elif isinstance(part, Falsity):
return Falsity()
elif not isinstance(part, Truth):
result_parts.append(part)
if not result_parts:
return Truth()
if len(result_parts) == 1:
return result_parts[0]
return Conjunction(result_parts)
def to_untyped_strips(self):
result = []
for part in self.parts:
result += part.to_untyped_strips()
return result
def instantiate(self, var_mapping, init_facts, fluent_facts, result):
assert not result, "Condition not simplified"
for part in self.parts:
part.instantiate(var_mapping, init_facts, fluent_facts, result)
def negate(self):
return Disjunction([p.negate() for p in self.parts])
class Disjunction(JunctorCondition):
def _simplified(self, parts):
result_parts = []
for part in parts:
if isinstance(part, Disjunction):
result_parts += part.parts
elif isinstance(part, Truth):
return Truth()
elif not isinstance(part, Falsity):
result_parts.append(part)
if not result_parts:
return Falsity()
if len(result_parts) == 1:
return result_parts[0]
return Disjunction(result_parts)
def negate(self):
return Conjunction([p.negate() for p in self.parts])
def has_disjunction(self):
return True
class QuantifiedCondition(Condition):
# Defining __eq__ blocks inheritance of __hash__, so must set it explicitly.
__hash__ = Condition.__hash__
def __init__(self, parameters, parts):
self.parameters = tuple(parameters)
self.parts = tuple(parts)
self.hash = hash((self.__class__, self.parameters, self.parts))
def __eq__(self, other):
# Compare hash first for speed reasons.
return (self.hash == other.hash and
self.__class__ is other.__class__ and
self.parameters == other.parameters and
self.parts == other.parts)
def _dump(self, indent=" "):
arglist = ", ".join(map(str, self.parameters))
return "%s %s" % (self.__class__.__name__, arglist)
def _simplified(self, parts):
if isinstance(parts[0], ConstantCondition):
return parts[0]
else:
return self._propagate(parts)
def uniquify_variables(self, type_map, renamings={}):
renamings = dict(renamings) # Create a copy.
new_parameters = [par.uniquify_name(type_map, renamings)
for par in self.parameters]
new_parts = (self.parts[0].uniquify_variables(type_map, renamings),)
return self.__class__(new_parameters, new_parts)
def free_variables(self):
result = Condition.free_variables(self)
for par in self.parameters:
result.discard(par.name)
return result
def change_parts(self, parts):
return self.__class__(self.parameters, parts)
class UniversalCondition(QuantifiedCondition):
def _untyped(self, parts):
type_literals = [par.get_atom().negate() for par in self.parameters]
return UniversalCondition(self.parameters,
[Disjunction(type_literals + parts)])
def negate(self):
return ExistentialCondition(self.parameters, [p.negate() for p in self.parts])
def has_universal_part(self):
return True
class ExistentialCondition(QuantifiedCondition):
def _untyped(self, parts):
type_literals = [par.get_atom() for par in self.parameters]
return ExistentialCondition(self.parameters,
[Conjunction(type_literals + parts)])
def negate(self):
return UniversalCondition(self.parameters, [p.negate() for p in self.parts])
def instantiate(self, var_mapping, init_facts, fluent_facts, result):
assert not result, "Condition not simplified"
self.parts[0].instantiate(var_mapping, init_facts, fluent_facts, result)
def has_existential_part(self):
return True
class Literal(Condition):
# Defining __eq__ blocks inheritance of __hash__, so must set it explicitly.
__hash__ = Condition.__hash__
parts = []
__slots__ = ["predicate", "args", "hash"]
def __init__(self, predicate, args):
self.predicate = predicate
self.args = tuple(args)
self.hash = hash((self.__class__, self.predicate, self.args))
def __eq__(self, other):
# Compare hash first for speed reasons.
return (self.hash == other.hash and
self.__class__ is other.__class__ and
self.predicate == other.predicate and
self.args == other.args)
def __ne__(self, other):
return not self == other
@property
def key(self):
return str(self.predicate), self.args
def __lt__(self, other):
return self.key < other.key
def __le__(self, other):
return self.key <= other.key
def __str__(self):
return "%s %s(%s)" % (self.__class__.__name__, self.predicate,
", ".join(map(str, self.args)))
def __repr__(self):
return '<%s>' % self
def _dump(self):
return str(self)
def change_parts(self, parts):
return self
def uniquify_variables(self, type_map, renamings={}):
return self.rename_variables(renamings)
def rename_variables(self, renamings):
new_args = tuple(renamings.get(arg, arg) for arg in self.args)
return self.__class__(self.predicate, new_args)
def replace_argument(self, position, new_arg):
new_args = list(self.args)
new_args[position] = new_arg
return self.__class__(self.predicate, new_args)
def free_variables(self):
return {arg for arg in self.args if arg[0] == "?"}
class Atom(Literal):
negated = False
def to_untyped_strips(self):
return [self]
def instantiate(self, var_mapping, init_facts, fluent_facts, result):
args = [var_mapping.get(arg, arg) for arg in self.args]
atom = Atom(self.predicate, args)
if atom in fluent_facts:
result.append(atom)
elif atom not in init_facts:
raise Impossible()
def negate(self):
return NegatedAtom(self.predicate, self.args)
def positive(self):
return self
class NegatedAtom(Literal):
negated = True
def _relaxed(self, parts):
return Truth()
def instantiate(self, var_mapping, init_facts, fluent_facts, result):
args = [var_mapping.get(arg, arg) for arg in self.args]
atom = Atom(self.predicate, args)
if atom in fluent_facts:
result.append(NegatedAtom(self.predicate, args))
elif atom in init_facts:
raise Impossible()
def negate(self):
return Atom(self.predicate, self.args)
positive = negate
| 11,179 | 29.380435 | 86 | py |
DAAISy | DAAISy-main/src/utils/translate/pddl_parser/lisp_parser.py | __all__ = ["ParseError", "parse_nested_list"]
class ParseError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return self.value
# Basic functions for parsing PDDL (Lisp) files.
def parse_nested_list(input_file):
tokens = tokenize(input_file)
next_token = next(tokens)
if next_token != "(":
raise ParseError("Expected '(', got %s." % next_token)
result = list(parse_list_aux(tokens))
for tok in tokens: # Check that generator is exhausted.
raise ParseError("Unexpected token: %s." % tok)
return result
def tokenize(input):
for line in input:
line = line.split(";", 1)[0] # Strip comments.
try:
line.encode("ascii")
except UnicodeEncodeError:
raise ParseError("Non-ASCII character outside comment: %s" %
line[0:-1])
line = line.replace("(", " ( ").replace(")", " ) ").replace("?", " ?")
for token in line.split():
yield token.lower()
def parse_list_aux(tokenstream):
# Leading "(" has already been swallowed.
while True:
try:
token = next(tokenstream)
except StopIteration:
raise ParseError("Missing ')'")
if token == ")":
return
elif token == "(":
yield list(parse_list_aux(tokenstream))
else:
yield token
| 1,427 | 27.56 | 78 | py |
DAAISy | DAAISy-main/src/utils/translate/pddl_parser/pddl_file.py | from ...translate import options
from . import lisp_parser
from . import parsing_functions
file_open = open
def parse_pddl_file(type, filename):
try:
# The builtin open function is shadowed by this module's open function.
# We use the Latin-1 encoding (which allows a superset of ASCII, of the
# Latin-* encodings and of UTF-8) to allow special characters in
# comments. In all other parts, we later validate that only ASCII is
# used.
return lisp_parser.parse_nested_list(file_open(filename,
encoding='ISO-8859-1'))
except OSError as e:
raise SystemExit("Error: Could not read file: %s\nReason: %s." %
(e.filename, e))
except lisp_parser.ParseError as e:
raise SystemExit("Error: Could not parse %s file: %s\nReason: %s." %
(type, filename, e))
def open(domain_filename=None, task_filename=None):
task_filename = task_filename or options.task
domain_filename = domain_filename or options.domain
domain_pddl = parse_pddl_file("domain", domain_filename)
task_pddl = parse_pddl_file("task", task_filename)
return parsing_functions.parse_task(domain_pddl, task_pddl)
| 1,272 | 37.575758 | 79 | py |
DAAISy | DAAISy-main/src/utils/translate/pddl_parser/__init__.py | from .pddl_file import open
from .parsing_functions import *
| 61 | 19.666667 | 32 | py |
DAAISy | DAAISy-main/src/utils/translate/pddl_parser/parsing_functions.py | import sys
from .. import graph
from .. import pddl_fd as pddl
def parse_typed_list(alist, only_variables=False,
constructor=pddl.TypedObject,
default_type="object"):
result = []
while alist:
try:
separator_position = alist.index("-")
except ValueError:
items = alist
_type = default_type
alist = []
else:
items = alist[:separator_position]
_type = alist[separator_position + 1]
alist = alist[separator_position + 2:]
for item in items:
assert not only_variables or item.startswith("?"), \
"Expected item to be a variable: %s in (%s)" % (
item, " ".join(items))
entry = constructor(item, _type)
result.append(entry)
return result
def set_supertypes(type_list):
# TODO: This is a two-stage construction, which is perhaps
# not a great idea. Might need more thought in the future.
type_name_to_type = {}
child_types = []
for type in type_list:
type.supertype_names = []
type_name_to_type[type.name] = type
if type.basetype_name:
child_types.append((type.name, type.basetype_name))
for (desc_name, anc_name) in graph.transitive_closure(child_types):
type_name_to_type[desc_name].supertype_names.append(anc_name)
def parse_predicate(alist):
name = alist[0]
arguments = parse_typed_list(alist[1:], only_variables=True)
return pddl.Predicate(name, arguments)
def parse_function(alist, type_name):
name = alist[0]
arguments = parse_typed_list(alist[1:])
return pddl.Function(name, arguments, type_name)
def parse_condition(alist, type_dict, predicate_dict):
condition = parse_condition_aux(alist, False, type_dict, predicate_dict)
return condition.uniquify_variables({}).simplified()
def parse_condition_aux(alist, negated, type_dict, predicate_dict):
"""Parse a PDDL condition. The condition is translated into NNF on the fly."""
tag = alist[0]
if tag in ("and", "or", "not", "imply"):
args = alist[1:]
if tag == "imply":
assert len(args) == 2
if tag == "not":
assert len(args) == 1
return parse_condition_aux(
args[0], not negated, type_dict, predicate_dict)
elif tag in ("forall", "exists"):
parameters = parse_typed_list(alist[1])
args = alist[2:]
assert len(args) == 1
else:
return parse_literal(alist, type_dict, predicate_dict, negated=negated)
if tag == "imply":
parts = [parse_condition_aux(
args[0], not negated, type_dict, predicate_dict),
parse_condition_aux(
args[1], negated, type_dict, predicate_dict)]
tag = "or"
else:
parts = [parse_condition_aux(part, negated, type_dict, predicate_dict)
for part in args]
if tag == "and" and not negated or tag == "or" and negated:
return pddl.Conjunction(parts)
elif tag == "or" and not negated or tag == "and" and negated:
return pddl.Disjunction(parts)
elif tag == "forall" and not negated or tag == "exists" and negated:
return pddl.UniversalCondition(parameters, parts)
elif tag == "exists" and not negated or tag == "forall" and negated:
return pddl.ExistentialCondition(parameters, parts)
def parse_literal(alist, type_dict, predicate_dict, negated=False):
if alist[0] == "not":
assert len(alist) == 2
alist = alist[1]
negated = not negated
pred_id, arity = _get_predicate_id_and_arity(
alist[0], type_dict, predicate_dict)
if arity != len(alist) - 1:
raise SystemExit("predicate used with wrong arity: (%s)"
% " ".join(alist))
if negated:
return pddl.NegatedAtom(pred_id, alist[1:])
else:
return pddl.Atom(pred_id, alist[1:])
SEEN_WARNING_TYPE_PREDICATE_NAME_CLASH = False
def _get_predicate_id_and_arity(text, type_dict, predicate_dict):
global SEEN_WARNING_TYPE_PREDICATE_NAME_CLASH
the_type = type_dict.get(text)
the_predicate = predicate_dict.get(text)
if the_type is None and the_predicate is None:
raise SystemExit("Undeclared predicate: %s" % text)
elif the_predicate is not None:
if the_type is not None and not SEEN_WARNING_TYPE_PREDICATE_NAME_CLASH:
msg = ("Warning: name clash between type and predicate %r.\n"
"Interpreting as predicate in conditions.") % text
print(msg, file=sys.stderr)
SEEN_WARNING_TYPE_PREDICATE_NAME_CLASH = True
return the_predicate.name, the_predicate.get_arity()
else:
assert the_type is not None
return the_type.get_predicate_name(), 1
def parse_effects(alist, result, type_dict, predicate_dict):
"""Parse a PDDL effect (any combination of simple, conjunctive, conditional, and universal)."""
tmp_effect = parse_effect(alist, type_dict, predicate_dict)
normalized = tmp_effect.normalize()
cost_eff, rest_effect = normalized.extract_cost()
add_effect(rest_effect, result)
if cost_eff:
return cost_eff.effect
else:
return None
def add_effect(tmp_effect, result):
"""tmp_effect has the following structure:
[ConjunctiveEffect] [UniversalEffect] [ConditionalEffect] SimpleEffect."""
if isinstance(tmp_effect, pddl.ConjunctiveEffect):
for effect in tmp_effect.effects:
add_effect(effect, result)
return
else:
parameters = []
condition = pddl.Truth()
if isinstance(tmp_effect, pddl.UniversalEffect):
parameters = tmp_effect.parameters
if isinstance(tmp_effect.effect, pddl.ConditionalEffect):
condition = tmp_effect.effect.condition
assert isinstance(tmp_effect.effect.effect, pddl.SimpleEffect)
effect = tmp_effect.effect.effect.effect
else:
assert isinstance(tmp_effect.effect, pddl.SimpleEffect)
effect = tmp_effect.effect.effect
elif isinstance(tmp_effect, pddl.ConditionalEffect):
condition = tmp_effect.condition
assert isinstance(tmp_effect.effect, pddl.SimpleEffect)
effect = tmp_effect.effect.effect
else:
assert isinstance(tmp_effect, pddl.SimpleEffect)
effect = tmp_effect.effect
assert isinstance(effect, pddl.Literal)
# Check for contradictory effects
condition = condition.simplified()
new_effect = pddl.Effect(parameters, condition, effect)
contradiction = pddl.Effect(parameters, condition, effect.negate())
if contradiction not in result:
result.append(new_effect)
else:
# We use add-after-delete semantics, keep positive effect
if isinstance(contradiction.literal, pddl.NegatedAtom):
result.remove(contradiction)
result.append(new_effect)
def parse_effect(alist, type_dict, predicate_dict):
tag = alist[0]
if tag == "and":
return pddl.ConjunctiveEffect(
[parse_effect(eff, type_dict, predicate_dict) for eff in alist[1:]])
elif tag == "forall":
assert len(alist) == 3
parameters = parse_typed_list(alist[1])
effect = parse_effect(alist[2], type_dict, predicate_dict)
return pddl.UniversalEffect(parameters, effect)
elif tag == "when":
assert len(alist) == 3
condition = parse_condition(
alist[1], type_dict, predicate_dict)
effect = parse_effect(alist[2], type_dict, predicate_dict)
return pddl.ConditionalEffect(condition, effect)
elif tag == "increase":
assert len(alist) == 3
assert alist[1] == ['total-cost']
assignment = parse_assignment(alist)
return pddl.CostEffect(assignment)
else:
# We pass in {} instead of type_dict here because types must
# be static predicates, so cannot be the target of an effect.
return pddl.SimpleEffect(parse_literal(alist, {}, predicate_dict))
def parse_expression(exp):
if isinstance(exp, list):
functionsymbol = exp[0]
return pddl.PrimitiveNumericExpression(functionsymbol, exp[1:])
elif exp.replace(".", "").isdigit():
return pddl.NumericConstant(float(exp))
elif exp[0] == "-":
raise ValueError("Negative numbers are not supported")
else:
return pddl.PrimitiveNumericExpression(exp, [])
def parse_assignment(alist):
assert len(alist) == 3
op = alist[0]
head = parse_expression(alist[1])
exp = parse_expression(alist[2])
if op == "=":
return pddl.Assign(head, exp)
elif op == "increase":
return pddl.Increase(head, exp)
else:
assert False, "Assignment operator not supported."
def parse_action(alist, type_dict, predicate_dict):
iterator = iter(alist)
action_tag = next(iterator)
assert action_tag == ":action"
name = next(iterator)
parameters_tag_opt = next(iterator)
if parameters_tag_opt == ":parameters":
parameters = parse_typed_list(next(iterator),
only_variables=True)
precondition_tag_opt = next(iterator)
else:
parameters = []
precondition_tag_opt = parameters_tag_opt
if precondition_tag_opt == ":precondition":
precondition_list = next(iterator)
if not precondition_list:
# Note that :precondition () is allowed in PDDL.
precondition = pddl.Conjunction([])
else:
precondition = parse_condition(
precondition_list, type_dict, predicate_dict)
effect_tag = next(iterator)
else:
precondition = pddl.Conjunction([])
effect_tag = precondition_tag_opt
assert effect_tag == ":effect"
effect_list = next(iterator)
eff = []
if effect_list:
try:
cost = parse_effects(
effect_list, eff, type_dict, predicate_dict)
except ValueError as e:
raise SystemExit("Error in Action %s\nReason: %s." % (name, e))
for rest in iterator:
assert False, rest
if eff:
return pddl.Action(name, parameters, len(parameters),
precondition, eff, cost)
else:
return None
def parse_axiom(alist, type_dict, predicate_dict):
assert len(alist) == 3
assert alist[0] == ":derived"
predicate = parse_predicate(alist[1])
condition = parse_condition(
alist[2], type_dict, predicate_dict)
return pddl.Axiom(predicate.name, predicate.arguments,
len(predicate.arguments), condition)
def parse_task(domain_pddl, task_pddl):
domain_name, domain_requirements, types, type_dict, constants, predicates, predicate_dict, functions, actions, axioms \
= parse_domain_pddl(domain_pddl)
task_name, task_domain_name, task_requirements, objects, init, goal, use_metric = parse_task_pddl(task_pddl,
type_dict,
predicate_dict)
assert domain_name == task_domain_name
requirements = pddl.Requirements(sorted(set(
domain_requirements.requirements +
task_requirements.requirements)))
objects = constants + objects
check_for_duplicates(
[o.name for o in objects],
errmsg="error: duplicate object %r",
finalmsg="please check :constants and :objects definitions")
init += [pddl.Atom("=", (obj.name, obj.name)) for obj in objects]
return pddl.Task(
domain_name, task_name, requirements, types, objects,
predicates, functions, init, goal, actions, axioms, use_metric)
def parse_domain_pddl(domain_pddl):
iterator = iter(domain_pddl)
define_tag = next(iterator)
assert define_tag == "define"
domain_line = next(iterator)
assert domain_line[0] == "domain" and len(domain_line) == 2
yield domain_line[1]
## We allow an arbitrary order of the requirement, types, constants,
## predicates and functions specification. The PDDL BNF is more strict on
## this, so we print a warning if it is violated.
requirements = pddl.Requirements([":strips"])
the_types = [pddl.Type("object")]
constants, the_predicates, the_functions = [], [], []
correct_order = [":requirements", ":types", ":constants", ":predicates",
":functions"]
seen_fields = []
first_action = None
for opt in iterator:
field = opt[0]
if field not in correct_order:
first_action = opt
break
if field in seen_fields:
raise SystemExit("Error in domain specification\n" +
"Reason: two '%s' specifications." % field)
if (seen_fields and
correct_order.index(seen_fields[-1]) > correct_order.index(field)):
msg = "\nWarning: %s specification not allowed here (cf. PDDL BNF)" % field
print(msg, file=sys.stderr)
seen_fields.append(field)
if field == ":requirements":
requirements = pddl.Requirements(opt[1:])
elif field == ":types":
the_types.extend(parse_typed_list(
opt[1:], constructor=pddl.Type))
elif field == ":constants":
constants = parse_typed_list(opt[1:])
elif field == ":predicates":
the_predicates = [parse_predicate(entry)
for entry in opt[1:]]
the_predicates += [pddl.Predicate("=", [
pddl.TypedObject("?x", "object"),
pddl.TypedObject("?y", "object")])]
elif field == ":functions":
the_functions = parse_typed_list(
opt[1:],
constructor=parse_function,
default_type="number")
set_supertypes(the_types)
yield requirements
yield the_types
type_dict = {type.name: type for type in the_types}
yield type_dict
yield constants
yield the_predicates
predicate_dict = {pred.name: pred for pred in the_predicates}
yield predicate_dict
yield the_functions
entries = []
if first_action is not None:
entries.append(first_action)
entries.extend(iterator)
the_axioms = []
the_actions = []
for entry in entries:
if entry[0] == ":derived":
axiom = parse_axiom(entry, type_dict, predicate_dict)
the_axioms.append(axiom)
else:
action = parse_action(entry, type_dict, predicate_dict)
if action is not None:
the_actions.append(action)
yield the_actions
yield the_axioms
def parse_task_pddl(task_pddl, type_dict, predicate_dict):
iterator = iter(task_pddl)
define_tag = next(iterator)
assert define_tag == "define"
problem_line = next(iterator)
assert problem_line[0] == "problem" and len(problem_line) == 2
yield problem_line[1]
domain_line = next(iterator)
assert domain_line[0] == ":domain" and len(domain_line) == 2
yield domain_line[1]
requirements_opt = next(iterator)
if requirements_opt[0] == ":requirements":
requirements = requirements_opt[1:]
objects_opt = next(iterator)
else:
requirements = []
objects_opt = requirements_opt
yield pddl.Requirements(requirements)
if objects_opt[0] == ":objects":
yield parse_typed_list(objects_opt[1:])
init = next(iterator)
else:
yield []
init = objects_opt
assert init[0] == ":init"
initial = []
initial_true = set()
initial_false = set()
initial_assignments = dict()
for fact in init[1:]:
if fact[0] == "=":
try:
assignment = parse_assignment(fact)
except ValueError as e:
raise SystemExit("Error in initial state specification\n" +
"Reason: %s." % e)
if not isinstance(assignment.expression,
pddl.NumericConstant):
raise SystemExit("Illegal assignment in initial state " +
"specification:\n%s" % assignment)
if assignment.fluent in initial_assignments:
prev = initial_assignments[assignment.fluent]
if assignment.expression == prev.expression:
print("Warning: %s is specified twice" % assignment,
"in initial state specification")
else:
raise SystemExit("Error in initial state specification\n" +
"Reason: conflicting assignment for " +
"%s." % assignment.fluent)
else:
initial_assignments[assignment.fluent] = assignment
initial.append(assignment)
elif fact[0] == "not":
atom = pddl.Atom(fact[1][0], fact[1][1:])
check_atom_consistency(atom, initial_false, initial_true, False)
initial_false.add(atom)
else:
atom = pddl.Atom(fact[0], fact[1:])
check_atom_consistency(atom, initial_true, initial_false)
initial_true.add(atom)
initial.extend(initial_true)
yield initial
goal = next(iterator)
assert goal[0] == ":goal" and len(goal) == 2
yield parse_condition(goal[1], type_dict, predicate_dict)
use_metric = False
for entry in iterator:
if entry[0] == ":metric":
if entry[1] == "minimize" and entry[2][0] == "total-cost":
use_metric = True
else:
assert False, "Unknown metric."
yield use_metric
for entry in iterator:
assert False, entry
def check_atom_consistency(atom, same_truth_value, other_truth_value, atom_is_true=True):
if atom in other_truth_value:
raise SystemExit("Error in initial state specification\n" +
"Reason: %s is true and false." % atom)
if atom in same_truth_value:
if not atom_is_true:
atom = atom.negate()
print("Warning: %s is specified twice in initial state specification" % atom)
def check_for_duplicates(elements, errmsg, finalmsg):
seen = set()
errors = []
for element in elements:
if element in seen:
errors.append(errmsg % element)
else:
seen.add(element)
if errors:
raise SystemExit("\n".join(errors) + "\n" + finalmsg)
| 18,854 | 36.485089 | 123 | py |
DAAISy | DAAISy-main/src/utils/parser/parser.py | """PDDL parsing.
"""
from .structs import (Type, Predicate, LiteralConjunction, LiteralDisjunction,
Not, Anti, ForAll, Exists, TypedEntity, ground_literal)
import re
class Operator:
"""Class to hold an operator.
"""
def __init__(self, name, params, preconds, effects):
self.name = name # string
self.params = params # list of structs.Type objects
self.preconds = preconds # structs.Literal representing preconditions
self.effects = effects # structs.Literal representing effects
def __str__(self):
s = self.name + "(" + ",".join(self.params) + "): "
s += " & ".join(map(str, self.preconds.literals))
s += " => "
s += " & ".join(map(str, self.effects.literals))
return s
def pddl_str(self):
param_strs = [str(param).replace(":", " - ") for param in self.params]
s = "\n\n\t(:action {}".format(self.name)
s += "\n\t\t:parameters ({})".format(" ".join(param_strs))
preconds_pddl_str = self._create_preconds_pddl_str(self.preconds)
s += "\n\t\t:precondition (and {})".format(preconds_pddl_str)
indented_effs = self.effects.pddl_str().replace("\n", "\n\t\t")
s += "\n\t\t:effect {}".format(indented_effs)
s += "\n\t)"
return s
def _create_preconds_pddl_str(self, preconds):
all_params = set()
precond_strs = []
for term in preconds.literals:
params = set(map(str, term.variables))
if term.negated_as_failure:
# Negative term. The variables to universally
# quantify over are those which we have not
# encountered yet in this clause.
universally_quantified_vars = list(sorted(
params-all_params))
precond = ""
for var in universally_quantified_vars:
precond += "(forall ({}) ".format(
var.replace(":", " - "))
precond += "(or "
for var in universally_quantified_vars:
var_cleaned = "?"+var[:var.find(":")]
for param in list(sorted(all_params)):
param_cleaned = "?"+param[:param.find(":")]
precond += "(not (Different {} {})) ".format(
param_cleaned, var_cleaned)
precond += "(not {}))".format(term.positive.pddl_str())
for var in universally_quantified_vars:
precond += ")"
precond_strs.append(precond)
else:
# Positive term.
all_params.update(params)
precond_strs.append(term.pddl_str())
return "\n\t\t\t".join(precond_strs)
class PDDLParser:
"""PDDL parsing class.
"""
def _parse_into_literal(self, string, params, is_effect=False):
"""Parse the given string (representing either preconditions or effects)
into a literal. Check against params to make sure typing is correct.
"""
assert string[0] == "("
assert string[-1] == ")"
if string.startswith("(and") and string[4] in (" ", "\n", "("):
clauses = self._find_all_balanced_expressions(string[4:-1].strip())
return LiteralConjunction([self._parse_into_literal(clause, params,
is_effect=is_effect) for clause in clauses])
if string.startswith("(or") and string[3] in (" ", "\n", "("):
clauses = self._find_all_balanced_expressions(string[3:-1].strip())
return LiteralDisjunction([self._parse_into_literal(clause, params,
is_effect=is_effect) for clause in clauses])
if string.startswith("(forall") and string[7] in (" ", "\n", "("):
new_binding, clause = self._find_all_balanced_expressions(
string[7:-1].strip())
new_name, new_type_name = new_binding.strip()[1:-1].split("-")
new_name = new_name.strip()
new_type_name = new_type_name.strip()
assert new_name not in params, "ForAll variable {} already exists".format(new_name)
params[new_name] = self.types[new_type_name]
result = ForAll(self._parse_into_literal(clause, params, is_effect=is_effect),
TypedEntity(new_name, params[new_name]))
del params[new_name]
return result
if string.startswith("(exists") and string[7] in (" ", "\n", "("):
new_binding, clause = self._find_all_balanced_expressions(
string[7:-1].strip())
variables = self._parse_objects(new_binding[1:-1])
for v in variables:
params[v.name] = v.var_type
body = self._parse_into_literal(clause, params, is_effect=is_effect)
result = Exists(variables, body)
for v in variables:
del params[v.name]
return result
if string.startswith("(not") and string[4] in (" ", "\n", "("):
clause = string[4:-1].strip()
if is_effect:
return Anti(self._parse_into_literal(clause, params, is_effect=is_effect))
else:
return Not(self._parse_into_literal(clause, params, is_effect=is_effect))
string = string[1:-1].split()
pred, args = string[0], string[1:]
# Validate types against the given params dict.
assert pred in self.predicates, "Predicate {} is not defined".format(pred)
assert self.predicates[pred].arity == len(args), pred
for i, arg in enumerate(args):
if arg not in params:
import ipdb; ipdb.set_trace()
assert arg in params, "Argument {} is not in the params".format(arg)
return self.predicates[pred](*args)
def _parse_objects(self, objects):
if objects.find("\n") != -1:
objects = objects.split("\n")
elif self.uses_typing:
# Must be one object then; assumes that typed objects are new-line separated
assert objects.count(" - ") == 1
objects = [objects]
else:
# Space-separated
objects = objects.split()
to_return = set()
for obj in objects:
if self.uses_typing:
obj_name, obj_type_name = obj.strip().split(" - ")
obj_name = obj_name.strip()
obj_type_name = obj_type_name.strip()
else:
obj_name = obj.strip()
if " - " in obj_name:
obj_name, temp = obj_name.split(" - ")
obj_name = obj_name.strip()
assert temp == "default"
obj_type_name = "default"
if obj_type_name not in self.types:
print("Warning: type not declared for object {}, type {}".format(
obj_name, obj_type_name))
obj_type = Type(obj_type_name)
else:
obj_type = self.types[obj_type_name]
to_return.add(TypedEntity(obj_name, obj_type))
return sorted(to_return)
def _purge_comments(self, pddl_str):
# Purge comments from the given string.
while True:
match = re.search(r";(.*)\n", pddl_str)
if match is None:
return pddl_str
start, end = match.start(), match.end()
pddl_str = pddl_str[:start]+pddl_str[end-1:]
@staticmethod
def _find_balanced_expression(string, index):
"""Find balanced expression in string starting from given index.
"""
assert string[index] == "("
start_index = index
balance = 1
while balance != 0:
index += 1
symbol = string[index]
if symbol == "(":
balance += 1
elif symbol == ")":
balance -= 1
return string[start_index:index+1]
@staticmethod
def _find_all_balanced_expressions(string):
"""Return a list of all balanced expressions in a string,
starting from the beginning.
"""
assert string[0] == "("
assert string[-1] == ")"
exprs = []
index = 0
start_index = index
balance = 1
while index < len(string)-1:
index += 1
if balance == 0:
exprs.append(string[start_index:index])
# Jump to next "(".
while True:
if string[index] == "(":
break
index += 1
start_index = index
balance = 1
continue
symbol = string[index]
if symbol == "(":
balance += 1
elif symbol == ")":
balance -= 1
assert balance == 0
exprs.append(string[start_index:index+1])
return exprs
class PDDLDomainParser(PDDLParser):
"""PDDL domain parsing class.
"""
def __init__(self, domain_fname, expect_action_preds=True):
self.domain_fname = domain_fname
## These are the things that we will construct.
# String of domain name.
self.domain_name = None
# Dict from type name -> structs.Type object.
self.types = None
self.uses_typing = None
# Dict from predicate name -> structs.Predicate object.
self.predicates = None
# Dict from operator name -> Operator object (class defined above).
self.operators = None
# Read files.
with open(domain_fname, "r") as f:
self.domain = f.read().lower()
# Get action predicate names (not part of standard PDDL)
if not expect_action_preds:
self.actions = set()
else:
self.actions = self._parse_actions()
# Remove comments.
self.domain = self._purge_comments(self.domain)
assert ";" not in self.domain
# Run parsing.
self._parse_domain()
def _parse_actions(self):
start_ind = re.search(r"\(:action", self.domain).start()
actions = self._find_balanced_expression(self.domain, start_ind)
actions = actions[9:-1].strip()
return set(actions.split())
def _parse_domain(self):
patt = r"\(domain(.*?)\)"
self.domain_name = re.search(patt, self.domain).groups()[0].strip()
self._parse_domain_types()
self._parse_domain_predicates()
self._parse_domain_operators()
def _parse_domain_types(self):
match = re.search(r"\(:types", self.domain)
if not match:
self.types = {"default": Type("default")}
self.uses_typing = False
return
self.uses_typing = True
start_ind = match.start()
types = self._find_balanced_expression(self.domain, start_ind)
types = types[7:-1].split()
self.types = {type_name: Type(type_name) for type_name in types}
def _parse_domain_predicates(self):
start_ind = re.search(r"\(:predicates", self.domain).start()
predicates = self._find_balanced_expression(self.domain, start_ind)
predicates = predicates[12:-1].strip()
predicates = self._find_all_balanced_expressions(predicates)
self.predicates = {}
for pred in predicates:
pred = pred.strip()[1:-1].split("?")
pred_name = pred[0].strip()
# arg_types = [self.types[arg.strip().split("-")[1].strip()]
# for arg in pred[1:]]
arg_types = []
for arg in pred[1:]:
if ' - ' in arg:
assert arg_types is not None, "Mixing of typed and untyped args not allowed"
assert self.uses_typing
arg_type = self.types[arg.strip().split("-")[1].strip()]
arg_types.append(arg_type)
else:
assert not self.uses_typing
arg_types.append(self.types["default"])
self.predicates[pred_name] = Predicate(
pred_name, len(pred[1:]), arg_types)
def _parse_domain_operators(self):
matches = re.finditer(r"\(:action", self.domain)
self.operators = {}
for match in matches:
start_ind = match.start()
op = self._find_balanced_expression(self.domain, start_ind).strip()
patt = r"\(:action(.*):parameters(.*):precondition(.*):effect(.*)\)"
op_match = re.match(patt, op, re.DOTALL)
op_name, params, preconds, effects = op_match.groups()
op_name = op_name.strip()
params = params.strip()[1:-1].split("?")
if self.uses_typing:
params = [(param.strip().split("-")[0].strip(),
param.strip().split("-")[1].strip())
for param in params[1:]]
params = [self.types[v]("?"+k) for k, v in params]
else:
params = [param.strip() for param in params[1:]]
params = [self.types["default"]("?"+k) for k in params]
preconds = self._parse_into_literal(preconds.strip(), params)
effects = self._parse_into_literal(effects.strip(), params, is_effect=True)
self.operators[op_name] = Operator(op_name, params, preconds, effects)
def write(self, fname):
"""Write the domain PDDL string to a file.
"""
predicates = "\n\t".join([lit.pddl_str() for lit in self.predicates.values()])
operators = "\n\t".join([op.pddl_str() for op in self.operators.values()])
domain_str = """
(define (domain {})
(:requirements :typing )
(:types {})
(:predicates {}
)
; (:actions {})
{}
)
""".format(self.domain_name, " ".join(self.types),
predicates, " ".join(map(str, self.actions)), operators)
with open(fname, 'w') as f:
f.write(domain_str)
class PDDLProblemParser(PDDLParser):
"""PDDL problem parsing class.
"""
def __init__(self, problem_fname, domain_name, types, predicates):
self.problem_fname = problem_fname
self.domain_name = domain_name
self.types = types
self.predicates = predicates
self.uses_typing = not ("default" in self.types)
self.problem_name = None
# Set of objects, each is a structs.TypedEntity object.
self.objects = None
# Set of fluents in initial state, each is a structs.Literal.
self.initial_state = None
# structs.Literal representing the goal.
self.goal = None
## Read files.
with open(problem_fname, "r") as f:
self.problem = f.read().lower()
#IPython.embed()
self.problem = self._purge_comments(self.problem)
assert ";" not in self.problem
## Run parsing.
self._parse_problem()
def _parse_problem(self):
patt = r"\(problem(.*?)\)"
if self.problem == '':
print("parser.py")
IPython.embed()
self.problem_name = re.search(patt, self.problem).groups()[0].strip()
patt = r"\(:domain(.*?)\)"
domain_name = re.search(patt, self.problem).groups()[0].strip()
assert domain_name == self.domain_name, "Problem file doesn't match the domain file!"
self._parse_problem_objects()
self._parse_problem_initial_state()
self._parse_problem_goal()
def _parse_problem_objects(self):
start_ind = re.search(r"\(:objects", self.problem).start()
objects = self._find_balanced_expression(self.problem, start_ind)
objects = objects[9:-1].strip()
self.objects = self._parse_objects(objects)
def _parse_problem_initial_state(self):
start_ind = re.search(r"\(:init", self.problem).start()
init = self._find_balanced_expression(self.problem, start_ind)
fluents = self._find_all_balanced_expressions(init[6:-1].strip())
self.initial_state = set()
params = {obj.name: obj.var_type for obj in self.objects}
for fluent in fluents:
self.initial_state.add(self._parse_into_literal(fluent, params))
def _parse_problem_goal(self):
start_ind = re.search(r"\(:goal", self.problem).start()
goal = self._find_balanced_expression(self.problem, start_ind)
#IPython.embed()
goal = goal[6:-1].strip()
params = {obj.name: obj.var_type for obj in self.objects}
self.goal = self._parse_into_literal(goal, params)
@staticmethod
def create_pddl_file(fname, objects, initial_state, problem_name, domain_name, goal):
"""Get the problem PDDL string for a given state.
"""
objects_typed = "\n\t".join(list(sorted(map(lambda o : str(o).replace(":", " - "),
objects))))
init_state = "\n\t".join([lit.pddl_str() for lit in sorted(initial_state)])
problem_str = """
(define (problem {}) (:domain {})
(:objects
{}
)
(:goal {})
(:init \n\t{}
))
""".format(problem_name, domain_name, objects_typed, goal.pddl_str(), init_state)
with open(fname, 'w') as f:
f.write(problem_str)
def write(self, fname):
"""Get the problem PDDL string for a given state.
"""
return PDDLProblemParser.create_pddl_file(fname, self.objects,
self.initial_state, self.problem_name, self.domain_name, self.goal)
def parse_plan_step(plan_step, operators, action_predicates):
plan_step_split = plan_step.split()
# Get the operator from its name
operator = None
for op in operators:
if op.name.lower() == plan_step_split[0]:
operator = op
break
assert operator is not None, "Unknown operator '{}'".format(plan_step_split[0])
assert len(plan_step_split) == len(operator.params) + 1
args = plan_step_split[1:]
assignments = dict(zip(operator.params, args))
for cond in operator.preconds.literals:
if cond.predicate in action_predicates:
ground_action = ground_literal(cond, assignments)
return ground_action
import ipdb; ipdb.set_trace()
raise Exception("Unrecognized plan step: `{}`".format(str(plan_step)))
| 18,447 | 38.844492 | 96 | py |
DAAISy | DAAISy-main/src/utils/parser/structs.py | """Python classes for common PDDL structures"""
import itertools
### PDDL Types, Objects, Variables ###
class Type(str):
"""A PDDL type"""
def __call__(self, entity_name):
return TypedEntity.__new__(TypedEntity, entity_name, self)
# Default type
NULLTYPE = Type("null")
class TypedEntity(str):
"""All objects and variables from PDDL are TypedEntitys"""
def __new__(cls, name, var_type):
assert isinstance(var_type, Type)
obj = str.__new__(cls, name)
obj.name = name
obj.var_type = var_type
obj._str = str(obj.name) + ":" + str(obj.var_type)
return obj
def __str__(self):
return self._str
def __repr__(self):
return str(self)
def __hash__(self):
return hash(str(self))
def __add__(self, other):
return str(self) + str(other)
def __radd__(self, other):
return str(other) + str(self)
def __copy__(self):
return self
def __deepcopy__(self, memo):
return self
def __getnewargs_ex__(self):
return ((self.name, self.var_type), {})
### Predicates ###
class Predicate(object):
"""
A Predicate is a factory for Literals.
Parameters
----------
name : str
arity : int
The number of variables in the predicate.
var_types : [ Type ]
The Type of each variable in the predicate.
is_negative : bool
Whether this Predicate is negative (as in a
negative precondition).
is_anti : bool
Whether this Predicate is anti (as in a
negative effect).
"""
def __init__(self, name, arity, var_types=None, is_negative=False, is_anti=False,
negated_as_failure=False):
self.name = name
self.arity = arity
self.var_types = var_types
self.is_negative = is_negative
self.negated_as_failure = negated_as_failure
self.is_anti = is_anti
def __call__(self, *variables):
return Literal(self, list(variables))
def __str__(self):
if self.negated_as_failure:
neg_prefix = '~'
elif self.is_negative:
neg_prefix = "Not"
elif self.is_anti:
neg_prefix = "Anti"
else:
neg_prefix = ""
return neg_prefix + self.name
def __repr__(self):
return str(self)
def __hash__(self):
return hash(str(self))
def __eq__(self, other):
return str(self) == str(other)
def __lt__(self, other):
return str(self) < str(other)
def __gt__(self, other):
return str(self) > str(other)
@property
def positive(self):
return self.__class__(self.name, self.arity, self.var_types,
is_anti=self.is_anti)
@property
def negative(self):
return self.__class__(self.name, self.arity, self.var_types, is_negative=True,
is_anti=self.is_anti)
@property
def inverted_anti(self):
assert not self.is_negative
return self.__class__(self.name, self.arity, self.var_types, is_anti=(not self.is_anti))
def negate_as_failure(self):
assert not self.negated_as_failure
return Predicate(self.name, self.arity, self.var_types,
negated_as_failure=True, is_anti=self.is_anti)
def pddl_variables(self):
variables = []
for i, vt in enumerate(self.var_types):
v = "?v{} - {}".format(i, vt)
variables.append(v)
return variables
def pddl_str(self):
if self.is_anti:
return "(not ({} {}))".format(self.inverted_anti, " ".join(
self.pddl_variables()))
if self.is_negative:
return "(not ({} {}))".format(self.positive, " ".join(
self.pddl_variables()))
if self.negated_as_failure:
raise NotImplementedError
return "({} {})".format(self, " ".join(self.pddl_variables()))
### Literals ###
class Literal:
"""A literal is a relation between objects or variables.
Both lifted literals (ones with variables) and ground
literals (ones with objects) are Literals in this code.
Parameters
----------
predicate : Predicate
variables : [ TypedEntity or str ]
"""
def __init__(self, predicate, variables):
self.predicate = predicate
self.variables = variables
self.is_negative = predicate.is_negative
self.is_anti = predicate.is_anti
self.negated_as_failure = predicate.negated_as_failure
# Verify types
if self.predicate.var_types is not None:
for i, (expected_type, var) in enumerate(zip(self.predicate.var_types, self.variables)):
if not hasattr(var, 'var_type'):
# Convert strings
self.variables[i] = expected_type(var)
elif var.var_type != expected_type:
raise TypeError()
def __str__(self):
return str(self.predicate) + '(' + ','.join(map(str, self.variables)) + ')'
def __repr__(self):
return str(self)
def __hash__(self):
return hash(repr(self))
def __eq__(self, other):
return repr(self) == repr(other)
def __lt__(self, other):
return repr(self) < repr(other)
def __gt__(self, other):
return repr(self) > repr(other)
@property
def positive(self):
return self.__class__(self.predicate.positive, [v for v in self.variables])
@property
def negative(self):
return self.__class__(self.predicate.negative, [v for v in self.variables])
@property
def inverted_anti(self):
return self.__class__(self.predicate.inverted_anti, [v for v in self.variables])
def negate_as_failure(self):
if self.negated_as_failure:
return self.positive
naf_predicate = self.predicate.negate_as_failure()
return naf_predicate(*self.variables)
def pddl_variables(self):
return [v.replace("(", "").replace(")", "").replace(",", "")
for v in self.variables]
def pddl_variables_typed(self):
return [str(v).replace("(", "").replace(")", "").replace(",", "").replace(":", " - ")
for v in self.variables]
def pddl_str(self):
if self.is_anti:
return "(not ({} {}))".format(self.predicate.inverted_anti, " ".join(
self.pddl_variables()))
if self.is_negative:
return "(not ({} {}))".format(self.predicate.positive, " ".join(
self.pddl_variables()))
if self.negated_as_failure:
raise NotImplementedError
return "({} {})".format(self.predicate, " ".join(self.pddl_variables()))
class LiteralConjunction:
"""A logical conjunction (AND) of Literals.
Parameters
----------
literals : [ Literal ]
"""
def __init__(self, literals):
self.literals = literals
def pddl_variables(self):
return set().union(*(lit.pddl_variables() for lit in self.literals))
def pddl_variables_typed(self):
return set().union(*(lit.pddl_variables_typed() for lit in self.literals))
def pddl_str(self):
return "(and\n\t{})".format("\n\t".join(
lit.pddl_str()for lit in self.literals))
def holds(self, state):
for lit in self.literals:
assert not lit.is_anti
if lit in state and lit.is_negative:
return False
if lit not in state and not lit.is_negative:
return False
return True
def __str__(self):
return "AND{}".format(self.literals)
def __repr__(self):
return str(self)
def __hash__(self):
return hash(str(self))
def __eq__(self, other):
return str(self) == str(other)
class LiteralDisjunction:
"""A logical disjunction (OR) of Literals.
Parameters
----------
literals : [ Literal ]
"""
def __init__(self, literals):
self.literals = literals
def pddl_variables(self):
return set().union(*(lit.pddl_variables() for lit in self.literals))
def pddl_variables_typed(self):
return set().union(*(lit.pddl_variables_typed() for lit in self.literals))
def pddl_str(self):
return "(or\n\t{})".format("\n\t".join(
lit.pddl_str()for lit in self.literals))
def holds(self, state):
return any(lit in state for lit in self.literals)
def __str__(self):
return "OR{}".format(self.literals)
def __repr__(self):
return str(self)
def __hash__(self):
return hash(str(self))
def __eq__(self, other):
return str(self) == str(other)
class ForAll:
"""Represents a ForAll over the given variable in the given literal.
variable is a structs.TypedEntity.
"""
def __init__(self, literal, variables):
if isinstance(variables, str):
variables = [variables]
self.literal = literal
self.variables = variables
def __str__(self):
return "FORALL ({}) : {}".format(self.variables, self.literal)
def __repr__(self):
return str(self)
def __hash__(self):
return hash(str(self))
def __eq__(self, other):
return str(self) == str(other)
class Exists:
"""
"""
def __init__(self, variables, literal):
self.variables = variables
self.body = literal
def __str__(self):
return "EXISTS ({}) : {}".format(self.variables, str(self.body))
def __repr__(self):
return str(self)
def __hash__(self):
return hash(str(self))
def __eq__(self, other):
return str(self) == str(other)
def pddl_str(self):
body_str = self.body.pddl_str()
var_str = '\n'.join(['{} - {}'.format(v.name, v.var_type) for v in self.variables])
return "(exists ({}) {})".format(var_str, body_str)
### Helpers ###
def Not(x): # pylint:disable=invalid-name
"""Negate a Predicate or Literal."""
if isinstance(x, Predicate):
return Predicate(x.name, x.arity, var_types=x.var_types,
is_negative=(not x.is_negative), is_anti=(x.is_anti))
assert isinstance(x, Literal)
new_predicate = Not(x.predicate)
return new_predicate(*x.variables)
def Anti(x): # pylint:disable=invalid-name
"""Invert a Predicate or Literal effect."""
if isinstance(x, Predicate):
return Predicate(x.name, x.arity, var_types=x.var_types,
is_anti=(not x.is_anti))
assert isinstance(x, Literal)
new_predicate = Anti(x.predicate)
return new_predicate(*x.variables)
def Effect(x): # pylint:disable=invalid-name
"""An effect predicate or literal.
"""
assert not x.negated_as_failure
if isinstance(x, Predicate):
return Predicate("Effect"+x.name, x.arity, var_types=x.var_types,
is_negative=x.is_negative, is_anti=x.is_anti)
assert isinstance(x, Literal)
new_predicate = Effect(x.predicate)
return new_predicate(*x.variables)
def effect_to_literal(literal):
assert isinstance(literal, Literal)
assert literal.predicate.name.startswith("Effect")
non_effect_pred = Predicate(literal.predicate.name[len("Effect"):], literal.predicate.arity,
literal.predicate.var_types, negated_as_failure=literal.predicate.negated_as_failure,
is_negative=literal.predicate.is_negative, is_anti=literal.predicate.is_anti)
return non_effect_pred(*literal.variables)
def ground_literal(lifted_lit, assignments):
"""Given a lifted literal, create a ground
literal with the assignments mapping vars to
objects.
Parameters
----------
lifted_lit : Literal
assignments : { TypedEntity : TypedEntity }
Vars to objects.
Returns
-------
ground_lit : Literal
"""
ground_vars = []
for v in lifted_lit.variables:
arg = assignments[v]
ground_vars.append(arg)
return lifted_lit.predicate(*ground_vars)
| 12,029 | 27.987952 | 100 | py |
DAAISy | DAAISy-main/src/utils/parser/__init__.py | from .structs import *
from .parser import PDDLDomainParser
| 60 | 19.333333 | 36 | py |
DAAISy | DAAISy-main/src/interrogation/aia.py | #!/usr/local/bin/python3
# encoding: utf-8
import copy
import sys
import importlib
import itertools
import pickle
import pprint
import time
import subprocess
from collections import Counter, OrderedDict
from itertools import combinations
import random
from ..config import *
from ..query import ExecutePlan
from ..lattice import LatticeNode, Lattice, State
from ..utils import *
class AgentInterrogation:
"""
:param agent: actual agent model
:type agent: object of class Model
:param abstract_model: model at the most abstracted node in the lattice
:type abstract_model: object of class Model
:param objects: Initial state of the problem
:type objects: dict of (str,str)
:param domain_name: Name of the domain
:type domain_name: String
:param abstract_predicates:
:param pred_type_mapping:
:param action_parameters:
:param types:
"""
def __init__(self, agent, abstract_model, objects, domain_name,
abstract_predicates, pred_type_mapping, action_parameters, types):
self.agent = agent
self.abstract_model = abstract_model
self.objects = objects
self.domain_name = domain_name
self.abstract_predicates = abstract_predicates
self.pred_type_mapping = pred_type_mapping
self.action_parameters = action_parameters
self.types = types
self.location = Location.ALL
self.difficult_pal_tuple = []
self.queries = {}
self.PALtuples_fixed_num = 0
self.failed_plans = []
self.agent_query_total = 0
self.query_old = 0
self.query_new = 0
self.invalid_init_state = 0
self.agent_cant_execute = 0
self.pal_tuple_dict = {}
self.timestr = time.strftime("%Y%m%d-%H%M%S")
self.init_result_file()
self.action_count = 0
self.predicate_count = 0
self.mod_pred_count = 0
self.start_time = time.time()
self.data_dict = OrderedDict()
self.random_state_file = RANDOM_STATE_FOLDER + "random_" + domain_name + ".pkl"
self.pal_tuple_order = None
self.ignore_PALs = set()
self.jumped_queue = set()
# self.initial_state_to_action_dict = None
self.data = None
self.tried_cont = 0
# self.query_1_failed = 0
self.discarded_count = 0
self.new_discard_count = 0
def init_result_file(self):
f = open(final_result_dir + self.domain_name + "-" + final_result_prefix + "-" + self.timestr + ".csv", "w")
f.write("domain_name, #action, #predicate, #modified_predicates, #PALtuples_fixed_num, #queries_total, "
"#agent_failed, #repeated_queries, #unique_queries, #model_similarity, #time_elapsed, pal_tuple\n")
f.close()
def fix_pal_tuple(self, pal_tuple, valid_models):
valid_models = [i for i in valid_models if not i.discarded]
assert (len(valid_models) <= 3)
print("Valid Model Len: ", len(valid_models))
print("Fixed pal tuple: ", pal_tuple)
mod_sim = 0
if not self.pal_tuple_dict[pal_tuple]:
self.pal_tuple_dict[pal_tuple] = True
self.PALtuples_fixed_num += 1
all_diff = []
for m in valid_models:
model_diff = get_model_difference(self.agent.agent_model, m, self.pal_tuple_dict)
print("Model Similarity: ", 1 - model_diff)
all_diff.append(model_diff)
mod_sim = 1 - max(all_diff)
f = open(final_result_dir + self.domain_name + "-" + final_result_prefix + "-" + self.timestr + ".csv",
"a")
f.write(",".join([self.domain_name, str(self.action_count), str(self.predicate_count),
str(self.mod_pred_count), str(self.PALtuples_fixed_num), str(self.agent_query_total),
str(self.agent_cant_execute), str(self.query_old), str(self.query_new), str(mod_sim),
str(time.time() - self.start_time), str(pal_tuple), "\n"]))
f.close()
self.data_dict[self.PALtuples_fixed_num] = [self.query_new, mod_sim, time.time() - self.start_time]
return
def ask_query(self, init_state, plan, valid_models, partial_init_check=False):
if self.data:
self.data["query_info"].append((init_state, plan, valid_models))
query = dict()
query['init_state'] = copy.deepcopy(State(init_state, self.objects))
query['plan'] = copy.deepcopy(plan)
self.agent_query_total += 1
key = str("||".join(sorted(state_to_set(query['init_state'].state)))) + "|||" + str("||".join(query['plan']))
if key not in self.queries:
self.query_new += 1
is_executable_agent, failure_index, possible_state = self.agent.run_query(query, self.pal_tuple_dict,
partial_init_check)
self.queries[key] = [is_executable_agent, failure_index, possible_state]
if failure_index == -1:
self.invalid_init_state += 1
if not is_executable_agent:
self.agent_cant_execute += 1
else:
self.query_old += 1
return self.queries[key]
return is_executable_agent, failure_index, possible_state
@staticmethod
def reject_action_pred_combo(action, pred, rejected_literal, position, action_pred_comb_dict):
"""
:param action:
:param pred:
:param rejected_literal:
:param position:
:param action_pred_comb_dict:
:return:
"""
# position = 0 for precondition, 1 for effect
if action not in action_pred_comb_dict.keys():
action_pred_comb_dict[action] = {pred: [[rejected_literal, position]]}
else:
rejected_preds = action_pred_comb_dict[action]
if pred not in rejected_preds.keys():
print("Some error, wrong call")
else:
rejected_pred_vals = rejected_preds[pred]
if [rejected_literal, position] not in rejected_pred_vals:
rejected_pred_vals.append([rejected_literal, position])
action_pred_comb_dict[action][pred] = rejected_pred_vals
else:
return action_pred_comb_dict
return action_pred_comb_dict
@staticmethod
def is_model_rejectable(model, action_pred_comb_dict):
"""
:param model:
:param action_pred_comb_dict:
:return:
"""
for action in action_pred_comb_dict.keys():
for pred, val in action_pred_comb_dict[action].items():
for v in val:
position = v[1]
literal = v[0]
try:
if model.actions[action][pred][position] == literal:
return True
else:
return False
except IndexError:
return False
def propagate_refinement_in_models(self, valid_models, issue, old_refinement, location=Location.PRECOND):
"""
:param valid_models:
:param issue:
:param old_refinement:
:param location:
:return:
"""
action = issue[0]
pred = issue[1]
mode = issue[2]
valid_models = [i for i in valid_models if not i.discarded]
for m in valid_models:
if old_refinement[1] in (m.actions[old_refinement[0]]).keys():
if m.actions[old_refinement[0]][old_refinement[1]] == [Literal.ABS, Literal.ABS] and \
not self.pal_tuple_dict[(old_refinement[0], old_refinement[1], Location.PRECOND)] and \
not self.pal_tuple_dict[(old_refinement[0], old_refinement[1], Location.EFFECTS)]:
m.actions[old_refinement[0]].pop(old_refinement[1], None)
valid_models = list(set(valid_models))
for m in valid_models:
if pred not in m.predicates.keys():
m.predicates[pred] = 0
if pred in m.actions[action]:
if m.actions[action][pred][location - 1] != mode and \
m.actions[action][pred][location - 1] != Literal.ABS:
m.discarded = True
else:
m.actions[action][pred][location - 1] = mode
else:
if location == Location.PRECOND:
m.actions[action][pred] = [mode, Literal.ABS]
elif location == Location.EFFECTS:
m.actions[action][pred] = [Literal.ABS, mode]
return valid_models
def get_next_pal_tuple(self, action="", predicate="", location=0):
"""
:param action:
:param predicate:
:param location:
:return:
"""
pre_easier_pal_tuples = list()
pre_difficult_pal_tuples = list()
eff_easier_pal_tuples = list()
eff_difficult_pal_tuples = list()
for key, val in self.pal_tuple_dict.items():
# Match action, predicate and refinement passed into the parameters and return if
# refinement not already done for those params
if not val and (action == "" or action == key[0]) and (predicate == "" or predicate == key[1]) and (location == 0 or location == key[2]) and key not in self.ignore_PALs:
if key[2]==Location.PRECOND:
if key not in self.difficult_pal_tuple:
pre_easier_pal_tuples.append(key)
else:
pre_difficult_pal_tuples.append(key)
else:
if key not in self.difficult_pal_tuple:
eff_easier_pal_tuples.append(key)
else:
eff_difficult_pal_tuples.append(key)
if len(pre_easier_pal_tuples)>0:
return pre_easier_pal_tuples[0]
elif len(pre_difficult_pal_tuples)>0:
return pre_difficult_pal_tuples[0]
elif len(eff_easier_pal_tuples)>0:
return eff_easier_pal_tuples[0]
elif len(eff_difficult_pal_tuples)>0:
return eff_difficult_pal_tuples[0]
else:
return None
def get_possible_init_states(self, init_state):
"""
:param init_state:
:return:
"""
def remove_duplicates(ilist):
temp_list=[]
for _l in ilist:
if len(set(_l)) == len(_l):
temp_list.append(_l)
return temp_list
def powerset(iterable):
"""powerset([1,2,3]) --> () (1,) (2,) (3,) (1,2) (1,3) (2,3) (1,2,3)"""
s = list(iterable)
return itertools.chain.from_iterable(itertools.combinations(s, r) for r in range(len(s) + 1))
new_init_states = [{}]
state_objects = []
for pred, _type in self.pred_type_mapping.items():
if _type == [] and pred not in init_state.keys():
init_state[pred] = [()]
_init_state_count = 0
# order = ["lift_at","above","origin","boarded","not_boarded","destin","served","not_served"]
# for pred in order:
for pred, _type in self.pred_type_mapping.items():
_type = self.pred_type_mapping[pred]
new_set = []
if not _type:
new_set = [()]
for t in _type:
new_set.append(self.objects[t])
new_list = list(itertools.product(*new_set))
new_list = remove_duplicates(new_list)
pow_set = list(powerset(new_list))
temp_init_states = copy.deepcopy(new_init_states)
for t in temp_init_states:
for p in pow_set:
if p != () or not _type:
new_temp_state_val = copy.deepcopy(t)
if _type:
new_temp_state_val[pred] = list(p)
else:
new_temp_state_val[pred] = [()]
init_state_obj = State(new_temp_state_val, self.objects)
state_objects.append(init_state_obj)
new_init_states.append(new_temp_state_val)
_init_state_count += 1
if _init_state_count > 500:
for _t in temp_init_states:
p = pow_set[-1]
if p != () or not _type:
new_temp_state_val = copy.deepcopy(_t)
if _type:
new_temp_state_val[pred] = list(p)
else:
new_temp_state_val[pred] = [()]
init_state_obj = State(new_temp_state_val, self.objects)
state_objects.append(init_state_obj)
new_init_states.append(new_temp_state_val)
_init_state_count += 1
break
if _init_state_count > 500:
break
return state_objects
def populate_action_pred_list(self, latt):
"""
This method populates the action_pred_list.
action_preds_list is used to store the modified predicates ("ontable|0", etc) corresponding to each action
For eg: actions_pred_list = {'pick-up': ['ontable|0', 'clear|0', 'handempty'],
'put-down': ['ontable|0', 'clear|0', 'handempty']}
:param latt:
:return:
"""
action_pred_list = {}
for action in self.abstract_model.actions.keys():
for predicate in self.pred_type_mapping.keys():
if action in action_pred_list.keys():
old_preds = action_pred_list[action]
if len(old_preds) > 0:
temp_preds = latt.generate_preds_for_action(predicate, action, self.pred_type_mapping,
self.action_parameters)
if temp_preds is None:
continue
action_pred_list[action].extend(temp_preds)
else:
temp_preds = latt.generate_preds_for_action(predicate, action, self.pred_type_mapping,
self.action_parameters)
if temp_preds is not None:
action_pred_list[action] = temp_preds
return action_pred_list
def generate_query(self, po_query_module, model_1, model_2, init_state, next_pal_tuple):
"""
:param po_query_module:
:param model_1:
:param model_2:
:param init_state:
:param next_pal_tuple:
:return:
"""
po_query = po_query_module.Query(model_1, model_2, init_state, next_pal_tuple, self.pal_tuple_dict)
plan_raw = po_query.get_plan_from_query(init_state, self.domain_name,
self.objects, self.pred_type_mapping,
self.action_parameters)
return plan_raw
def discard_models(self, m1, m2, init_state, plan_raw, next_pal_tuple, valid_models,
action, action_pred, action_pred_rejection_combination, state, predicate, modes,
po_query_module, ref):
po_query = po_query_module.Query(m1, m2, init_state, next_pal_tuple, self.pal_tuple_dict)
plan_m1 = ExecutePlan(m1, po_query.init_state, plan_raw)
is_executable_m1, state_m1, failure_index_1 = plan_m1.execute_plan(self.pal_tuple_dict)
plan_m2 = ExecutePlan(m2, po_query.init_state, plan_raw)
is_executable_m2, state_m2, failure_index_2 = plan_m2.execute_plan(self.pal_tuple_dict)
is_any_model_discarded = False
if (not is_executable_m1 or not is_executable_m2) and next_pal_tuple[2] == Location.PRECOND:
if not is_executable_m1:
self.discarded_count += 1
self.discard_model(m1, valid_models)
is_any_model_discarded = True
rejected_literal = m1.actions[action][action_pred][int(ref) - 1]
modes.remove(rejected_literal)
self.reject_action_pred_combo(action, action_pred, rejected_literal, int(ref) - 1,
action_pred_rejection_combination)
# {'pickupb1': {'on-floor-bottle1': [<Literal.ABS: 0>, <Literal.NEG: -1>]}}
# {'pickupb1': {'on-floor-bottle1': [<Literal.NEG: -1>, <Literal.NEG: -1>]}}
# M2 can't run the plan so returns None, but agent can run
# If concretized version (agent) can run, then abstracted version should definitely run,
# reverse need not be true
if not is_executable_m2:
self.discarded_count += 1
self.discard_model(m2, valid_models)
is_any_model_discarded = True
rejected_literal = m2.actions[action][action_pred][int(ref) - 1]
modes.remove(rejected_literal)
self.reject_action_pred_combo(action, action_pred, rejected_literal,
int(ref) - 1,
action_pred_rejection_combination)
if not m1.discarded and not m2.discarded and \
next_pal_tuple[2] == Location.EFFECTS:
original_predicate = predicate.split("|")[0]
final_action = plan_raw[-1].split("|")[1:]
for z in predicate.split("|")[1:]:
original_predicate += "|" + final_action[int(z)]
"""
if pi(M_A) -> p1 \\in S_F(M_A)
\foll M_i if pi(M_i) -> ~p1 \\in S_F(M_i) REJECT
else POSSIBLE, CAN'T REJECT
Why NOT ACCEPTING, a1, a2 in pi
if in M_A a1 making p1 true, but in M_i a2 making p1 true
then M_i not equal to M_A but we can't know.
We can ACCEPT only if PLAN LEN = 1
"""
if (original_predicate in state and original_predicate not in state_m2) or \
(original_predicate not in state and original_predicate in state_m2):
self.discarded_count += 1
self.discard_model(m2, valid_models)
is_any_model_discarded = True
rejected_literal = m2.actions[action][action_pred][int(ref) - 1]
self.reject_action_pred_combo(action, action_pred, rejected_literal,
int(ref) - 1,
action_pred_rejection_combination)
if (original_predicate in state and original_predicate not in state_m1) or \
(original_predicate not in state and original_predicate in state_m1):
self.discarded_count += 1
self.discard_model(m1, valid_models)
is_any_model_discarded = True
rejected_literal = m1.actions[action][action_pred][int(ref) - 1]
self.reject_action_pred_combo(action, action_pred, rejected_literal,
int(ref) - 1,
action_pred_rejection_combination)
return self.discarded_count, is_any_model_discarded, valid_models
def is_action_pred_compatible(self, action, pred):
possible = True
for p in self.pred_type_mapping[pred]:
if p not in self.action_parameters[action]:
possible = False
return possible
def get_additional_plans(self, action, model1, model2, next_pal_tuple):
action_name = action.split("|")[0]
param = FF_PATH + "ff"
param += " -o " + Q_DOMAIN_FILE
param += " -f " + Q_PROBLEM_FILE
param += " -i 120 | grep -i \"Action " + action_name + "\" |"
param += " sed 's/Action //'"
param += " > " + ALL_ACTION_FILE
p = subprocess.Popen([param], shell=True)
p.wait()
with open(ALL_ACTION_FILE) as f:
possible_actions = f.read().splitlines()
f = open(Q_DOMAIN_FILE, "w")
FileUtils.write_domain_to_file(f, self.domain_name, self.objects, self.pred_type_mapping,
self.action_parameters, model1, model2, self.pal_tuple_dict, next_pal_tuple)
f.close()
valid_plans = []
t_act = " ".join(action.lower().split("|"))
for _act in possible_actions:
if _act.lower() == t_act:
continue
f = open(temp_plan_file, "w")
f.write("0.00000: (" + _act + ")\n")
f.close()
param = VAL_PATH + " -v"
param += " " + Q_DOMAIN_FILE
param += " " + Q_PROBLEM_FILE
param += " " + temp_plan_file
param += " | grep -i \"successfully\""
param += " > " + Q_RESULT_FILE
p = subprocess.Popen([param], shell=True)
p.wait()
if "successfully" in open(Q_RESULT_FILE).read():
_temp_val = _act.lower().split(" ")
valid_plans.append(["|".join(_temp_val)])
return valid_plans
def did_jump_queue(self, current_plan, actual_pal_tuple):
current_action = current_plan[0].split("|")[0]
if actual_pal_tuple[0] != current_action:
if current_action not in self.jumped_queue:
return True
else:
self.jumped_queue.add(current_action)
return False
def update_pal_ordering(self, init_state, failure_index, plan_raw, valid_models,
next_pal_tuple, model1, model2, po_query_module, lattice_node):
full_states_used = False
refined_for_agent_failure = False
if failure_index > 0:
temp_plan_raw = copy.deepcopy(plan_raw[failure_index:])
plan = ExecutePlan(model1, init_state, plan_raw[:failure_index])
is_executable_agent, state_abstracted, fail_index = plan.execute_plan(self.pal_tuple_dict)
assert (is_executable_agent and failure_index == fail_index)
# Assertion failure? Then how was query generated
else:
temp_plan_raw = copy.deepcopy(plan_raw)
found_state = False
possible_state = None
state_full = self.get_full_state(temp_plan_raw[0], lattice_node)
# State with all the predicates
is_executable_agent, failure_index, state = self.ask_query(set_to_state(state_full), temp_plan_raw[0:1], valid_models)
if is_executable_agent:
found_state = True
if self.did_jump_queue(temp_plan_raw, next_pal_tuple) and len(temp_plan_raw)>1:
refined_for_agent_failure, valid_models = self.update_pal_ordering(state, 0, temp_plan_raw[1:], valid_models,
next_pal_tuple, model1, model2, po_query_module, lattice_node)
self.ignore_PALs.add(next_pal_tuple)
temp_plan_raw = temp_plan_raw[0:1]
possible_state = copy.deepcopy(set_to_state(state_full))
full_states_used = True
else:
print("FAILURE")
start = 0
sys.path.append('../../src/')
with open(self.random_state_file, 'rb') as f:
random_states = pickle.load(f)
i = 0
_curr_state_set = copy.deepcopy(state_full)
for _state in state_full:
_curr_state_set.remove(_state)
_curr_state = set_to_state(_curr_state_set)
is_executable_agent, failure_index, state = self.ask_query(_curr_state, temp_plan_raw[0:1], valid_models)
if is_executable_agent:
possible_state = _curr_state
found_state = True
if self.did_jump_queue(temp_plan_raw, next_pal_tuple) and len(temp_plan_raw) > 1:
refined_for_agent_failure, valid_models = self.update_pal_ordering(state, 0,
temp_plan_raw[1:],
valid_models,
next_pal_tuple, model1,
model2, po_query_module,
lattice_node)
temp_plan_raw = temp_plan_raw[0:1]
break
else:
_curr_state_set.append(_state)
if not found_state:
# Getting state from random states start
for _state in random_states:
i += 1
self.objects = _state.objects
_curr_state = _state.state
_temp_plan_raw = self.generate_query(po_query_module, model1, model2, _curr_state,
next_pal_tuple)
if len(_temp_plan_raw) != 0:
# Last action of generated plan should be distinguishing
curr_action = temp_plan_raw[0].split("|")[0]
_i = 0
for _i in range(len(_temp_plan_raw)):
if _temp_plan_raw[_i].split("|")[0] == curr_action:
break
if _i == len(_temp_plan_raw):
continue
if _i > 0:
is_executable_agent, failure_index, state = self.ask_query(_state.state,
_temp_plan_raw[0:_i], valid_models)
if is_executable_agent:
_curr_state = State(set_to_state(_curr_state), _state.objects)
start = _i
else:
continue
else:
continue
is_executable_agent, failure_index, state = self.ask_query(_curr_state,
_temp_plan_raw[start:start + 1], valid_models)
if is_executable_agent:
found_state = True
possible_state = _curr_state
temp_plan_raw = copy.deepcopy(_temp_plan_raw[start:start + 1])
break
else:
print("Failed: ", i)
if not found_state:
print("Here after failing")
self.failed_plans.append(temp_plan_raw)
return False, valid_models
if found_state:
# Now check if any predicate found in possible state is redundant?
# Remove one of them at a time and see if plan still executes successfully
abs_preds_precond = []
current_state = None
neg_case = None
for i in range(0, len(temp_plan_raw) + 1):
if i > 0:
# This is a golden chance to get the possible effects
# We have already found the minimal state that needs to be true
# to execute the action temp_plan_raw[i-1], now whatever effect we get is minimal effect
is_executable_agent, failure_index, possible_state = self.ask_query(current_state, temp_plan_raw[i - 1:i], valid_models, True)
try:
assert (is_executable_agent and failure_index == 1)
except AssertionError as e:
# We cannot make calls on effects as agent didn't run the plan.
print("Assertion Error: ", e)
is_executable_agent, failure_index, possible_state = self.ask_query(current_state,
temp_plan_raw[i - 1:i],
valid_models, True)
break
poss_state_set = possible_state
curr_state_set = state_to_set(current_state)
add_effects = poss_state_set - curr_state_set
del_effects = curr_state_set - poss_state_set
abs_effects = curr_state_set & poss_state_set
doubtful_preds = set(state_full) - curr_state_set
neg_precond = set()
temp_act = temp_plan_raw[i - 1].split("|")[0]
instantiated_pred_doubtful = set()
for d_pred in doubtful_preds:
if not (
self.is_action_pred_compatible(temp_plan_raw[i - 1].split("|")[0],
d_pred.split("|")[0])):
continue
action_name, instantiated_pred = map_pred_action_param(d_pred, temp_plan_raw[i - 1])
if instantiated_pred is not None and action_name is not None:
instantiated_pred_doubtful.add(instantiated_pred)
if not full_states_used:
instantiated_pred_doubtful = instantiated_pred_doubtful | abs_preds_precond
for d_pred in instantiated_pred_doubtful:
if self.pal_tuple_dict[(temp_act, d_pred, Location.PRECOND)]:
continue
d_pred = instantiate_pred_with_action(d_pred, temp_plan_raw[i - 1])
if d_pred is None:
continue
_new_state_set = {d_pred} | curr_state_set
_new_state = set_to_state(_new_state_set)
is_executable_agent, failure_index, possible_state = self.ask_query(_new_state,
temp_plan_raw[i - 1:i], valid_models)
if not is_executable_agent:
action_name, instantiated_pred = map_pred_action_param(d_pred, temp_plan_raw[i - 1])
if failure_index != -1:
if instantiated_pred in abs_preds_precond:
abs_preds_precond.remove(instantiated_pred)
neg_precond.add(instantiated_pred)
elif d_pred not in possible_state:
if d_pred in abs_effects:
abs_effects.remove(d_pred)
del_effects.add(d_pred)
for preds in abs_preds_precond:
if not self.pal_tuple_dict[(temp_act, preds, Location.PRECOND)]:
valid_models = self.propagate_refinement_in_models(valid_models,
[temp_act, preds, Literal.ABS],
next_pal_tuple, Location.PRECOND)
self.fix_pal_tuple((temp_act, preds, Location.PRECOND), valid_models)
for preds in neg_precond:
if not self.pal_tuple_dict[(temp_act, preds, Location.PRECOND)]:
valid_models = self.propagate_refinement_in_models(valid_models,
[temp_act, preds, Literal.NEG],
next_pal_tuple, Location.PRECOND)
self.fix_pal_tuple((temp_act, preds, Location.PRECOND), valid_models)
for e in add_effects:
action_name, instantiated_pred = map_pred_action_param(e, temp_plan_raw[i - 1])
if action_name is None or \
(action_name, instantiated_pred, Location.EFFECTS) not in self.pal_tuple_dict.keys() or\
self.pal_tuple_dict[(action_name, instantiated_pred, Location.EFFECTS)]:
continue
valid_models = self.propagate_refinement_in_models(valid_models,
[action_name, instantiated_pred,
Literal.POS], next_pal_tuple,
Location.EFFECTS)
self.fix_pal_tuple((action_name, instantiated_pred, Location.EFFECTS), valid_models)
for e in del_effects:
action_name, instantiated_pred = map_pred_action_param(e, temp_plan_raw[i - 1])
if action_name is None or \
(action_name, instantiated_pred, Location.EFFECTS) not in self.pal_tuple_dict.keys() or\
self.pal_tuple_dict[(action_name, instantiated_pred, Location.EFFECTS)]:
continue
valid_models = self.propagate_refinement_in_models(valid_models,
[action_name, instantiated_pred,
Literal.NEG], next_pal_tuple,
Location.EFFECTS)
self.fix_pal_tuple((action_name, instantiated_pred, Location.EFFECTS), valid_models)
# Since we are not using all possible predicates in init state, any inferences about effect being
# absent is incorrect.
for e in abs_effects:
action_name, instantiated_pred = map_pred_action_param(e, temp_plan_raw[i - 1])
if action_name is None or \
(action_name, instantiated_pred, Location.EFFECTS) not in self.pal_tuple_dict.keys() or\
self.pal_tuple_dict[(action_name, instantiated_pred, Location.EFFECTS)]:
continue
valid_models = self.propagate_refinement_in_models(valid_models,
[action_name, instantiated_pred,
Literal.ABS], next_pal_tuple,
Location.EFFECTS)
self.fix_pal_tuple((action_name, instantiated_pred, Location.EFFECTS), valid_models)
# ASSUMING AGENT CAN EXECUTE TEMP_PLAN_RAW[i]
temp_act = temp_plan_raw[i - 1].split("|")[0]
all_preds_abs = [tup[1] for tup in self.pal_tuple_dict.keys() if
tup[0] == temp_act and
tup[2] == Location.EFFECTS and
not self.pal_tuple_dict[tup]]
for preds in all_preds_abs:
if not self.pal_tuple_dict[(temp_act, preds, Location.EFFECTS)]:
valid_models = self.propagate_refinement_in_models(valid_models,
[temp_act, preds, Literal.ABS],
next_pal_tuple, Location.EFFECTS)
self.fix_pal_tuple((temp_act, preds, Location.EFFECTS), valid_models)
if i == len(temp_plan_raw):
continue
possible_state = set_to_state(curr_state_set)
current_state = copy.deepcopy(possible_state)
_full_state = copy.deepcopy(possible_state)
new_preds_temp = []
for key, val in _full_state.items():
if not (self.is_action_pred_compatible(temp_plan_raw[i].split("|")[0], key)):
continue
for v in val:
temp_init = copy.deepcopy(current_state)
if len(val) == 1:
del temp_init[key]
assert (key not in list(temp_init.keys()))
else:
temp_init[key].remove(v)
is_executable_agent, failure_index, possible_state = self.ask_query(temp_init,
temp_plan_raw[i:], valid_models, True)
# For plans of length more than 1, it is possible that agent failed for 2nd or later action
# in the plan, so even if is_executable_agent false, check if failure_index = 1 or later
if is_executable_agent or failure_index >= 1:
initial_val_len = len(current_state[key])
current_state[key].remove(v)
if initial_val_len > 0 and len(current_state[key]) == 0:
del current_state[key]
else:
if isinstance(v, (list, tuple)) and len(v) > 1:
final_val = key
for ind in range(0, len(v)):
final_val += "|" + v[ind]
predicate_temp = [final_val]
else:
predicate_temp = list(state_to_set({key: tuple(v, )}))
action = temp_plan_raw[i]
action_name, instantiated_pred = map_pred_action_param(predicate_temp[0], action)
if action_name is None:
continue
new_preds_temp.append(instantiated_pred)
if self.pal_tuple_dict[(action_name, instantiated_pred, Location.PRECOND)]:
continue
valid_models = self.propagate_refinement_in_models(valid_models,
[action_name, instantiated_pred,
Literal.POS], next_pal_tuple,
Location.PRECOND)
self.fix_pal_tuple((action_name, instantiated_pred, Location.PRECOND), valid_models)
# ASSUMING AGENT CAN EXECUTE TEMP_PLAN_RAW[i]
temp_act = temp_plan_raw[i].split("|")[0]
all_preds_poss = [tup[1] for tup in self.pal_tuple_dict.keys() if
tup[0] == temp_act and tup[2] == Location.PRECOND]
abs_preds_precond = set(all_preds_poss) - set(new_preds_temp)
neg_case = False
if not neg_case:
# agent fails for current PAL but updates PAL ordering and refines some other PAL, then refined_for_agent_failure is set to True
refined_for_agent_failure = True
return refined_for_agent_failure, valid_models
@staticmethod
def discard_model(m, valid_models):
m.discarded = True
for tm in valid_models:
if tm == m:
tm.discarded = True
def initialize_pal_tuple_dict(self, lattice_node):
# To keep track which of the action_predicate_refinement is done
# Dict{} with keys (action, predicate, refinement)
# Values are Boolean
# pal_tuple_dict = {}
replaced_actions = []
for action in self.abstract_model.actions.keys():
for predicate in self.pred_type_mapping.keys():
# Generate the predicates with action parameter index inbuilt into them
# ontable takes one argument, if action a has 2 possible locations of that argument type, say 0 and 2
# so it'll get converted to ontable|0 and ontable|2 when called with action a
temp_preds = lattice_node.generate_preds_for_action(predicate, action, self.pred_type_mapping,
self.action_parameters)
if temp_preds is not None:
for i in range(2):
for p in temp_preds:
# Use tuples as key
key = (action, p, Location(i + 1))
p_name = p.lower().split("|")[0]
# if self.agent.agent_type == "simulator" and p_name in replaced_actions:
# self.pal_tuple_dict[key] = True
# # We can do this at the end too if p_name == action.replace('-', '') and p_params ==
# # sorted(p_params) and p_params[0] == "0": self.abstract_model.actions[action][p] =
# # [Literal.POS, Literal.ABS] else: self.abstract_model.actions[action][p] = [Literal.ABS,
# # Literal.ABS]
# else:
self.pal_tuple_dict[key] = False
if self.data and self.data["PALtuples_fixed"]:
for tup in self.data["PALtuples_fixed"]:
self.pal_tuple_dict[tup] = True
if self.data and self.data["PALtuples_dropped"]:
for tup in self.data["PALtuples_dropped"]:
self.pal_tuple_dict[tup] = False
if self.data and self.data["PALtuples_dropped_no_obs"]:
for tup in self.data["PALtuples_dropped_no_obs"]:
self.pal_tuple_dict[tup] = False
return
def get_full_state(self, action, lattice_node):
action_name = action.split("|")[0]
action_params = action.split("|")[1:]
full_state = []
for predicate in self.pred_type_mapping.keys():
# Generate the predicates with action parameter index inbuilt into them
# ontable takes one argument, if action a has 2 possible locations of that argument type, say 0 and 2
# so it'll get converted to ontable|0 and ontable|2 when called with action a
temp_preds = lattice_node.generate_preds_for_action(predicate, action_name, self.pred_type_mapping,
self.action_parameters)
if temp_preds is not None:
for temp_pred in temp_preds:
pred = temp_pred.split("|")[0]
pred_params = temp_pred.split("|")[1:]
for p in pred_params:
pred += "|" + action_params[int(p)]
full_state.append(pred)
return full_state
@staticmethod
def get_modified_init_states(next_pal_tuple, m1, m2, possible_init_states):
action_pred = next_pal_tuple[1]
action = next_pal_tuple[0]
# loc = next_pal_tuple[2]
# pred_name = action_pred.split("|")[0]
modified_init_states = []
for p, val in m1.actions[action].items():
pos = False
neg = False
pred = p.split("|")[0] + "|"
count = 0
for preds in m1.actions[action].keys():
if pred in preds:
count += 1
if count > 1:
continue
if p != action_pred and val == m2.actions[action][p]:
if val[0] == Literal.POS:
pos = True
else:
neg = True
elif p == action_pred and val != m2.actions[action][p]:
if {Literal.POS, Literal.NEG} == {val[0], m2.actions[action][p][0]}:
continue
elif {Literal.POS, Literal.ABS} == {val[0], m2.actions[action][p][0]}:
neg = True
elif {Literal.ABS, Literal.NEG} == {val[0], m2.actions[action][p][0]}:
pos = True
if len(modified_init_states) == 0:
temp_init_states = copy.deepcopy(possible_init_states)
else:
temp_init_states = copy.deepcopy(modified_init_states)
modified_init_states = []
if pos:
for s in temp_init_states:
if p.split("|")[0] in s.state.keys():
modified_init_states.append(s)
elif neg:
for s in temp_init_states:
if p.split("|")[0] not in s.state.keys():
modified_init_states.append(s)
if len(modified_init_states) == 0:
modified_init_states = copy.deepcopy(possible_init_states)
return copy.deepcopy(modified_init_states)
def get_objects(self):
# This logic limits number of objects and counts number of objects
# Hypothesis: Number of objects needed = max arity of that object type in any action
object_count = 0
max_obj_type_count = {}
for obj_types, obj_cont in self.objects.items():
object_count += len(obj_cont)
max_obj_type_count[obj_types] = 0
for v in self.action_parameters.values():
type_count = Counter(v)
for tc in type_count:
max_obj_type_count[tc] = max(max_obj_type_count[tc], type_count[tc])
temp_objects = {}
for o, item in self.objects.items():
if len(item) > max_obj_type_count[o] + 1:
temp_key = copy.deepcopy(item[0:max_obj_type_count[o]])
else:
temp_key = copy.deepcopy(item)
temp_objects[o] = temp_key
return temp_objects, object_count
def convert_state_to_pred_args_repr(self, state):
"""
transform fama state to a map of predicate to argument list e.g. {'above' : {('f0','f1')}} as required by AIA
"""
state_predicates_to_params = dict()
for literal in state.literals:
if literal.name not in state_predicates_to_params.keys():
state_predicates_to_params[literal.name] = set()
state_predicates_to_params[literal.name].add(tuple(literal.args))
for key in state_predicates_to_params.keys():
state_predicates_to_params[key] = list(state_predicates_to_params[key])
return state_predicates_to_params
def filter_models_for_every_combination(self, valid_models, action_pred_rejection_combination, possible_state_objects, \
next_pal_tuple, action, action_pred, predicate, ref, po_query_module, lattice_node, agent_exec, refined_for_agent_failure):
modes = [Literal.POS, Literal.NEG, Literal.ABS]
# Generate all possible combinations of models
for m1, m2 in combinations(valid_models, 2):
if m1.discarded or m2.discarded:
continue
if m1.actions == m2.actions:
self.new_discard_count += 1
self.discard_model(m1, valid_models)
continue
if len(action_pred_rejection_combination) > 0:
if self.is_model_rejectable(m1, action_pred_rejection_combination):
self.new_discard_count += 1
self.discard_model(m1, valid_models)
if self.is_model_rejectable(m2, action_pred_rejection_combination):
self.new_discard_count += 1
self.discard_model(m2, valid_models)
self.tried_cont += 1
init_state_tried = 0
is_any_model_discarded = False
if ref == Location.PRECOND:
modified_state_objects = self.get_modified_init_states(next_pal_tuple, m1, m2,
possible_state_objects)
else:
modified_state_objects = copy.deepcopy(possible_state_objects)
for state_objs in modified_state_objects:
init_state = state_objs.state
self.objects = state_objs.objects
# New logic in update_pal_ordering to add pals to ignore_PALs.
update_pal_ordering_called = False
init_state_tried += 1
# preds_in_m1_m2 = list(set(list(m1.predicates.keys())) & set(list(m2.predicates.keys())))
# preds_in_m1_m2 = [i for j in preds_in_m1_m2 for i in j.split("|")]
# if list(set(preds_in_m1_m2) & set(list(init_state.keys()))) == []:
# continue
# if action_to_initial_states_dict and len(initial_state_to_action_dict)>0:
# plan_raw = [initial_state_to_action_dict[state_objs]]
# else:
# if next_pal_tuple[0]=="up" or next_pal_tuple[0]=="board" and next_pal_tuple[2]==Location.PRECOND:
# print("here")
plan_raw = self.generate_query(po_query_module, m1, m2, init_state, next_pal_tuple)
if len(plan_raw) != 0:
is_executable_agent, failure_index, state = self.ask_query(init_state, plan_raw, valid_models)
agent_exec += 1
if failure_index != len(plan_raw) or not is_executable_agent:
update_pal_ordering_called = True
refined_for_agent_failure, valid_models = self.update_pal_ordering(init_state,
failure_index,
plan_raw,
valid_models,
next_pal_tuple, m1,
m2, po_query_module,
lattice_node)
break
elif failure_index == len(plan_raw) and is_executable_agent:
self.discarded_count, is_any_model_discarded, \
valid_models = self.discard_models(m1, m2, init_state, plan_raw,
next_pal_tuple, valid_models, action, action_pred,
action_pred_rejection_combination, state, predicate,
modes, po_query_module, ref)
else:
exit()
if is_any_model_discarded or refined_for_agent_failure:
break
if not update_pal_ordering_called:
self.ignore_PALs.add(next_pal_tuple)
if refined_for_agent_failure:
break
return valid_models
def get_action_params(self, action):
"""
return action name and params separated by "|" e.g. board|f1|p0
"""
action_params = action.name
for param in action.parameters:
action_params += "|"+param.name
return action_params
def get_predicate_params(self, action, predicate_ids):
"""
using params from action, return predicate in terms of params
"""
ids_to_params = dict()
id_ = 0
for param in action.parameters:
ids_to_params[id_] = param.name
id_ += 1
predicate_name = predicate_ids.split("|")[0]
predicate_params = list()
for id_ in predicate_ids.split("|")[1:]:
predicate_params.append(ids_to_params[int(id_)])
return predicate_name, tuple(predicate_params)
def get_predicate_presence(self, state, predicate_name, predicate_params):
state_presence = "negative"
if predicate_name in state.keys():
if predicate_params in state[predicate_name]:
state_presence = "positive"
return state_presence
def get_init_state_flipped(self, init_state, predicate_name, predicate_params):
init_state_flipped = copy.deepcopy(init_state)
init_state_presence = "negative"
if predicate_name in init_state.keys():
if predicate_params in init_state[predicate_name]:
init_state_presence = "positive"
params_list = init_state_flipped[predicate_name]
params_list.remove(predicate_params)
init_state_flipped[predicate_name] = params_list
if init_state_presence=="negative":
if predicate_name not in init_state_flipped.keys():
init_state_flipped[predicate_name] = list()
params_list = init_state_flipped[predicate_name]
params_list.append(predicate_params)
init_state_flipped[predicate_name] = params_list
return init_state_flipped, init_state_presence
def filter_models_by_computing_ModeTuple_for_PA(self, valid_models, next_pal_tuple, agent_exec):
"""
looks up state, action executable in that state, and next state from observations and then queries for state with predicate flipped
"""
action_name = next_pal_tuple[0]
action = list(self.data["action_to_statepair_set"][action_name].keys())[0]
statepair = list(self.data["action_to_statepair_set"][action_name][action])[0]
init_state, next_state = self.convert_state_to_pred_args_repr(statepair[0]), self.convert_state_to_pred_args_repr(statepair[1])
valid_models = self.filter_models_when_state_executable_action_known(valid_models, next_pal_tuple, init_state, action, next_state, agent_exec)
return valid_models
def filter_models_when_state_executable_action_known(self, valid_models, next_pal_tuple, init_state, action, next_state, agent_exec):
"""
sets correct mode for a predicate in an action for both pre and eff (based on querying the agent)
when state where the action is executable is available
"""
action_name = next_pal_tuple[0]
predicate_ids = next_pal_tuple[1]
if next_pal_tuple[2]==Location.PRECOND:
# ask query
action_params = self.get_action_params(action)
predicate_name, predicate_params = self.get_predicate_params(action, predicate_ids)
init_state_flipped, init_state_presence = self.get_init_state_flipped(init_state, predicate_name, predicate_params)
is_executable_agent, failure_index, next_state_flipped = self.ask_query(init_state_flipped, [action_params], valid_models)
agent_exec += 1
if is_executable_agent:
pre = Literal.ABS
elif init_state_presence=="positive":
pre = Literal.POS
else:
pre = Literal.NEG
# compute valid eff
valid_mode_tuples = list()
if (action_name,predicate_ids) in self.data["PATuple_to_valid_ModeTuple_set_dict"].keys():
for mode_tuple in self.data["PATuple_to_valid_ModeTuple_set_dict"][(action_name,predicate_ids)]:
if mode_tuple[0]==pre:
valid_mode_tuples.append(mode_tuple)
if len(valid_mode_tuples)==1:
# can lookup for eff as pre is either POS or NEG, hence, single valid eff possible
eff = valid_mode_tuples[0][1]
else:
next_state_presence = self.get_predicate_presence(next_state, predicate_name, predicate_params)
# if is_executable_agent==False i.e. pre is POS or NEG
if pre==Literal.POS:
if next_state_presence=="positive":
eff = Literal.ABS
else:
eff = Literal.NEG
elif pre==Literal.NEG:
if next_state_presence=="positive":
eff = Literal.ABS
else:
eff = Literal.POS
else:
# if pre is ABS use query response to find eff
next_state_flipped = set_to_state(next_state_flipped)
next_state_flipped_presence = self.get_predicate_presence(next_state_flipped, predicate_name, predicate_params)
if next_state_presence==next_state_flipped_presence:
if init_state_presence=="positive":
eff = Literal.POS
else:
eff = Literal.NEG
else:
eff = Literal.ABS
set_valid_model = False
for valid_model in valid_models:
if set_valid_model:
valid_model.discarded = True
else:
valid_model.actions[action_name][predicate_ids] = (pre,eff)
self.ignore_PALs.add(next_pal_tuple)
self.fix_pal_tuple(next_pal_tuple, valid_models)
next_pal_tuple_eff = tuple([next_pal_tuple[0], next_pal_tuple[1], Location.EFFECTS])
self.ignore_PALs.add(next_pal_tuple_eff)
self.fix_pal_tuple(next_pal_tuple_eff, valid_models)
set_valid_model = True
return valid_models
def get_pals_for_action(self, action_name):
pals = set()
for key, val in self.pal_tuple_dict.items():
if key[0]==action_name and key[2]==Location.PRECOND:
pals.add(key)
return pals
def query_for_actions_with_no_observations(self, valid_models, next_pal_tuple, agent_exec):
"""
For actions that are unavailable in the observations, check if they've been marked as 'changed' during the inference process
If marked 'changed', then query using states where the actions were previously executable by the init model
If the current agent executes the action the state, then it can be used to filter models
"""
valid_states_set = set()
for action_name, action_states_map in self.data["action_to_statepair_set_init"].items():
for action, states_tup_set in self.data["action_to_statepair_set_init"][action_name].items():
for states_tup in states_tup_set:
valid_states_set.add(states_tup[0])
valid_state_pred_arg_repr_set = list()
for state in valid_states_set:
valid_state_pred_arg_repr_set.append(self.convert_state_to_pred_args_repr(state))
# if len(self.data["actions_with_no_obs"]) > 0:
# for action_name in self.data["actions_with_no_obs"]:
for action_name in self.data["marked_changed_actions"]:
# action = list(self.data['action_to_statepair_set_init'][action_name].keys())[0]
# init_state = list(self.data['action_to_statepair_set_init'][action_name][action])[0][0]
# init_state = self.convert_state_to_pred_args_repr(init_state)
action = self.data["action_name_to_action"][action_name]
action_params = self.get_action_params(action)
for init_state in valid_state_pred_arg_repr_set:
is_executable_agent, failure_index, next_state = self.ask_query(init_state, [action_params], valid_models)
if is_executable_agent:
next_state = set_to_state(next_state)
pals = self.get_pals_for_action(action_name)
for pal in pals:
valid_models = self.filter_models_when_state_executable_action_known(valid_models, pal, init_state, action, next_state, agent_exec)
break
return valid_models
def agent_interrogation_algo(self, data=None):
"""
:return: true for successful execution, false otherwise
:rtype: bool
"""
self.data = data
# Import modules for both kind of queries
# pr_query_module = importlib.import_module("query.pr_query")
po_query_module = importlib.import_module("src.query.po_query")
init_state = {}
# exit(0)
# Create list of predicates and abstracted predicates
abs_predicates = list(self.abstract_model.predicates.keys())
all_predicates = list(self.agent.agent_model.predicates.keys())
# Create a lattice object
latt = Lattice()
lattice_node = LatticeNode(latt, [self.abstract_model], self.abstract_predicates)
# object type to objects
self.objects, object_count = copy.deepcopy(self.get_objects())
# To keep track which of the action_predicate_refinement is done
# Dict{} with keys (action, predicate, refinement)
# Values are Boolean
self.initialize_pal_tuple_dict(lattice_node)
# To calculate actual number of predicates
pred_set = set()
for p in list(self.pal_tuple_dict.keys()):
pred_set.add(p[1])
modified_preds_count = len(pred_set)
self.action_count = len(self.abstract_model.actions.keys())
self.predicate_count = len(self.pred_type_mapping.keys())
self.mod_pred_count = copy.deepcopy(modified_preds_count)
#################################
# int_parent_models holds the possible models at any given point of time.
int_parent_models = [self.abstract_model]
# action_preds_list is used to store the modified predicates ("ontable|0", etc) corresponding to each action
# For eg: actions_pred_list = {'pick-up': ['ontable|0', 'clear|0', 'handempty'], \
# 'put-down': ['ontable|0', 'clear|0', 'handempty']}
action_preds_list = self.populate_action_pred_list(lattice_node)
lattice_node.action_pred_dict = copy.deepcopy(action_preds_list)
temp_init_state = copy.deepcopy(init_state)
original_abs_preds = copy.deepcopy(abs_predicates)
original_action_pred_list = copy.deepcopy(action_preds_list)
preprocess_start_time = time.time()
orig_possible_state_object_comb = self.get_possible_init_states(temp_init_state)
preprocess_time = time.time() - preprocess_start_time
agent_exec = 0
print("Actual Predicates = " + str(len(self.pred_type_mapping.keys())) + str("\n"))
print("Modified Predicates = " + str(modified_preds_count) + str("\n"))
print("Action Count = " + str(len(self.abstract_model.actions.keys())) + str("\n"))
print("Object Count = " + str(object_count) + str("\n"))
valid_models = None
while True:
model_level = 0
next_pal_tuple = self.get_next_pal_tuple()
refined_for_agent_failure = False
if next_pal_tuple is None:
# All possible refinements over
# int_parent_models should be holding possible models at most concretized level
# return False, False, None, None, None, None, None, None
break
# Try to generate predicates that are part of init_state
init_state = copy.deepcopy(temp_init_state)
if len(abs_predicates) < len(init_state):
predicate = get_next_predicate(init_state, abs_predicates)
else:
predicate = get_next_predicate(all_predicates, abs_predicates)
# All predicates done!!
if predicate is None:
next_r = self.get_next_pal_tuple()
if next_r is not None:
abs_predicates = copy.deepcopy(original_abs_preds)
action_preds_list = copy.deepcopy(original_action_pred_list)
else:
exit(0)
return False, None
continue
# Pick something in init state.
# Hack to get plans that distinguish at more abstract levels
original_pred = predicate
pred_valid = False
temp_action_preds_list = copy.deepcopy(action_preds_list)
for action in action_preds_list:
for pred in action_preds_list[action]:
if predicate in pred or predicate == pred:
pred_valid = True
predicate = pred
action_preds_list[action].remove(pred)
if not pred_valid:
abs_predicates.append(original_pred)
continue
next_pal_tuple = self.get_next_pal_tuple()
predicate = next_pal_tuple[1]
print("\nACTUAL NEXT PAL TUPLE: ", next_pal_tuple)
tmp_int_parent_models = []
action_pred_rejection_combination = {}
modes = [Literal.POS, Literal.NEG, Literal.ABS]
for temp_abs_model in int_parent_models:
action_pred = next_pal_tuple[1]
action = next_pal_tuple[0]
ref = next_pal_tuple[2]
# valid_modes = pa_tuple_to_mode_tuple_set[next_pal_tuple]
# partitions stores the partitions for a refinement next_pal_tuple when called on
# a model temp_abs_model
intermediate_models = lattice_node.get_model_partitions(temp_abs_model, action_pred,
ref, action, tuple(modes))
# Run query and discard models here
# Remove all invalid models and store only the valid ones
if self.data and valid_models == None:
valid_models = self.data["drifted_valid_models"]
valid_models = [i for i in intermediate_models if not i.discarded]
if self.data and self.data["flag_approach"]==0:
valid_models = self.filter_models_by_computing_ModeTuple_for_PA(valid_models, next_pal_tuple, agent_exec)
elif self.data and self.data["flag_approach"]==1:
possible_state_objects = copy.deepcopy(orig_possible_state_object_comb)
valid_models = self.filter_models_for_every_combination(valid_models, action_pred_rejection_combination, possible_state_objects, \
next_pal_tuple, action, action_pred, predicate, ref, po_query_module, lattice_node, agent_exec, refined_for_agent_failure)
elif self.data and self.data["flag_approach"]==2:
if next_pal_tuple in self.data["PALtuples_dropped"]:
valid_models = self.filter_models_by_computing_ModeTuple_for_PA(valid_models, next_pal_tuple, agent_exec)
elif next_pal_tuple in self.data["PALtuples_dropped_no_obs"]:
possible_state_objects = copy.deepcopy(orig_possible_state_object_comb)
valid_models = self.filter_models_for_every_combination(valid_models, action_pred_rejection_combination, possible_state_objects, \
next_pal_tuple, action, action_pred, predicate, ref, po_query_module, lattice_node, agent_exec, refined_for_agent_failure)
else:
# filter models for every combination
possible_state_objects = copy.deepcopy(orig_possible_state_object_comb)
valid_models = self.filter_models_for_every_combination(valid_models, action_pred_rejection_combination, possible_state_objects, \
next_pal_tuple, action, action_pred, predicate, ref, po_query_module, lattice_node, agent_exec, refined_for_agent_failure)
t_valid_models = [i for i in valid_models if not i.discarded]
if refined_for_agent_failure and len(t_valid_models) == 3:
break
tmp_int_parent_models = [i for i in valid_models if not i.discarded]
if len(tmp_int_parent_models) == 1:
break
if refined_for_agent_failure:
int_parent_models = copy.deepcopy(valid_models)
action_preds_list = copy.deepcopy(temp_action_preds_list)
continue
valid_models = [i for i in valid_models if not i.discarded]
if len(valid_models) == 1 and not self.pal_tuple_dict[next_pal_tuple]:
self.fix_pal_tuple(next_pal_tuple, valid_models)
tmp_int_parent_models = [i for i in valid_models if not i.discarded]
int_parent_models = copy.deepcopy(tmp_int_parent_models)
model_level += 1
model_count = 1
total_models = 0
for m in int_parent_models:
model_count += 1
temp_num_models = 0
for key, val in m.actions.items():
for k, v in val.items():
if v[0] in [Literal.AP, Literal.AN, Literal.NP]:
temp_num_models += 1
if v[1] in [Literal.AP, Literal.AN, Literal.NP]:
temp_num_models += 1
total_models += 2 ** temp_num_models
pp = pprint.PrettyPrinter(indent=2)
if len(valid_models) <= 3:
print("Current Model(s): ")
for v in valid_models:
pp.pprint(v.actions)
if self.get_next_pal_tuple(predicate=predicate) is None:
abs_predicates.append(predicate)
# if self.data:
# if valid_models == None:
# valid_models = self.data["drifted_valid_models"]
# if self.data["negative_examples"]==True:
# print(data["actions_with_no_obs"])
# valid_models = self.query_for_actions_with_no_observations(valid_models, agent_exec)
# # filter models for every combination
# possible_state_objects = copy.deepcopy(orig_possible_state_object_comb)
# valid_models = self.filter_models_for_every_combination(valid_models, action_pred_rejection_combination, possible_state_objects, \
# next_pal_tuple, action, action_pred, predicate, ref, po_query_module, lattice_node, agent_exec, refined_for_agent_failure)
num_models = []
model_count_final = 0
for m in int_parent_models:
model_count_final += 1
temp_num_models = 0
for key, val in m.actions.items():
for k, v in val.items():
if v[0] in [Literal.AP, Literal.AN, Literal.NP]:
temp_num_models += 1
if v[1] in [Literal.AP, Literal.AN, Literal.NP]:
temp_num_models += 1
num_models.append(temp_num_models)
total_models = 0
for num in num_models:
total_models += 2 ** num
len(self.abstract_model.actions.keys())
pp = pprint.PrettyPrinter(indent=2)
print("Predicted Model: ")
for v in valid_models:
pp.pprint(v.actions)
# avg_model_similarity, num_diff = self.get_model_similarity(valid_models)
# print("Avg model similarity: ", avg_model_similarity)
# print("Avg model diff count: ", num_diff)
print("Total Possible Models = " + str(total_models) + str("\n"))
print("Number of times Agent Executed = " + str(agent_exec) + str("\n"))
print("Preprocessing Time = " + str(preprocess_time) + str("\n"))
print("Actual Predicates = " + str(len(self.pred_type_mapping.keys())) + str("\n"))
print("Modified Predicates = " + str(modified_preds_count) + str("\n"))
print("Action Count = " + str(len(self.abstract_model.actions.keys())) + str("\n"))
print("Object Count = " + str(object_count) + str("\n"))
print("Possible Model count = " + str(len(valid_models)))
print("Combinations tried Count = " + str(self.tried_cont))
print("Agent Execution Failed Count = " + str(self.agent_cant_execute))
print("Total Agent Queries = ", self.agent_query_total)
print("Total Unique Queries = ", self.query_new)
print("Repeated Queries = ", self.query_old)
print("Invalid Init State = ", self.invalid_init_state)
# print("No plan found count (q1) = " + str(query_1_failed))
print("Discarded model count = " + str(self.discarded_count))
print("New Discard count = " + str(self.new_discard_count))
print("\n")
return self.agent_query_total, self.query_new, self.agent_cant_execute, self.query_old, (time.time() - self.start_time), self.data_dict, self.PALtuples_fixed_num, self.pal_tuple_order, valid_models, self.data
| 74,300 | 48.933468 | 216 | py |
DAAISy | DAAISy-main/src/interrogation/__init__.py | from .aia import AgentInterrogation
| 36 | 17.5 | 35 | py |
DAAISy | DAAISy-main/src/query/exec_plan.py | #!/usr/local/bin/python3
# encoding: utf-8
import copy
import os
import sys
from src.config import *
sys.path.append(os.path.abspath(os.path.dirname(__file__) + '/' + '../..'))
class ExecutePlan:
"""
This class executes a plan on a model sarting at an initial state.
:param targetModel: an instance of class Model on which plan is to be executed
:type targetModel: object of class Model
:param init_state: Initial state (list of predicates)
:type init_state: list of strs
:param rawPlan: list of actions
:type rawPlan: list of strs
"""
def __init__(self, targetModel, init_state, rawPlan):
"""
This method creates a new instance of ExecutePlan.
"""
self.init_state = []
for p, v in init_state.items():
for items in v:
t_init_state = p
for i in items:
t_init_state += "|" + i
self.init_state.append(t_init_state)
self.tModel = targetModel
self.plan = rawPlan
def canActionBeApplied(self, actions, state, p, refinement_dict):
plan_split_list = p.split('|')
action_name = plan_split_list[0]
action_params = plan_split_list[1:]
actionPred_original = actions[action_name]
actionPreds = {}
for pred, v in actionPred_original.items():
temp_pred = pred.split("|")[0]
type_pos = pred.rstrip("|").split("|")[1:]
for type_positions in type_pos:
temp_pred += "|" + action_params[int(type_positions)]
if temp_pred in actionPreds.keys():
v1 = actionPreds[temp_pred]
v2 = v
if v1 == [Literal.ABS, Literal.ABS]:
actionPreds[temp_pred] = v2
elif v2 == [Literal.ABS, Literal.ABS]:
actionPreds[temp_pred] = v1
else:
print("Failed in canApplyAction")
return False, None
else:
actionPreds[temp_pred] = v
failed = False
for pred, val in actionPreds.items():
t_value = copy.deepcopy(val)
if (t_value[0] == Literal.AN or t_value[0] == Literal.AP):
t_value[0] = Literal.ABS
elif (t_value[0] == Literal.NP):
t_value[0] = Literal.POS
if ((t_value[0] == Literal.POS) and (pred not in state)) or \
(t_value[0] == Literal.NEG and (pred in state)):
failed = True
pred_params = pred.split("|")[1:]
pred_name = pred.split("|")[0]
for param in pred_params:
indx = action_params.index(param)
if indx != -1:
pred_name += "|" + str(indx)
if (refinement_dict[(action_name, pred_name, Location.PRECOND)] == False):
return False, [pred_name, t_value[0]]
else:
continue
return not (failed), None
def applyAction(self, actions, state, p):
plan_split_list = p.split('|')
action_name = plan_split_list[0]
action_params = plan_split_list[1:]
actionPred_original = actions[action_name]
actionPreds = {}
for pred, v in actionPred_original.items():
temp_pred = pred.split("|")[0]
type_pos = pred.rstrip("|").split("|")[1:]
for type_positions in type_pos:
temp_pred += "|" + action_params[int(type_positions)]
if temp_pred in actionPreds.keys():
v1 = actionPreds[temp_pred]
v2 = v
if v1 == [Literal.ABS, Literal.ABS]:
actionPreds[temp_pred] = v2
elif v2 == [Literal.ABS, Literal.ABS]:
actionPreds[temp_pred] = v1
else:
return False, None
else:
actionPreds[temp_pred] = v
tempState = copy.deepcopy(state)
for pred, val in actionPreds.items():
t_value = copy.deepcopy(val)
if (t_value[1] == Literal.AN or t_value[1] == Literal.AP):
t_value[1] = Literal.ABS
elif (t_value[1] == Literal.NP):
t_value[1] = Literal.POS
if (t_value[1] == Literal.POS):
tempState.add(pred)
elif (t_value[1] == Literal.NEG):
# If it was absent in precondition, we can make it negative.
if pred in tempState:
tempState.remove(pred)
elif (t_value[0] == Literal.ABS):
continue
else:
return False, None
return True, tempState
def execute_plan(self, refinement_dict):
"""
This method calculates the state after a plan is executed.
This only works for add delete lists in preconditions and effects.
"""
actions = self.tModel.actions
actions = {k.lower(): v for k, v in actions.items()}
initialState = set(self.init_state)
currState = copy.deepcopy(initialState)
plan_index = 0
for p in self.plan:
can_apply_action, issue = self.canActionBeApplied(actions, currState, p, refinement_dict)
if can_apply_action:
is_ok, newState = self.applyAction(actions, currState, p)
if is_ok == False:
return False, None, None
currState = copy.deepcopy(newState)
else:
return False, currState, plan_index
plan_index += 1
return True, currState, plan_index
| 5,763 | 33.309524 | 101 | py |
DAAISy | DAAISy-main/src/query/po_query.py | #!/usr/local/bin/python3
# encoding: utf-8
import copy
import os
import subprocess
import sys
from itertools import combinations
from ..config import *
from ..utils import FileUtils
sys.path.append(os.path.abspath(os.path.dirname(__file__) + '/' + '../..'))
from . import genericQuery as gc
class Query(gc.GenericQuery):
"""
This class is for the plan executability query.
Given two models, it finds a plan that will result in both the models
leading to different states.
Initial State has to be given as input.
:param model1: an instance of class Model
:type model1: object of class Model
:param model2: an instance of class Model
:type model2: object of class Model
:param init_state: Initial state (list of predicates)
:type init_state: list of strs
"""
def __init__(self, model1, model2, init_state, next_pal_tuple, pal_tuple_dict):
"""
This method creates a new instance of AR Query.
"""
self.model1 = copy.deepcopy(model1)
self.model2 = copy.deepcopy(model2)
self.init_state = dict()
self.plan = []
self.pal = next_pal_tuple
self.debug_text = ''
self.pal_tuple_dict = pal_tuple_dict
predicates = {}
pred_names_set = set()
for key, value in self.model1.predicates.items():
pred_name = key.split("|")[0]
new_key = key.replace(pred_name, pred_name + "_1")
predicates[new_key] = value
pred_names_set.add(pred_name)
for key, value in self.model2.predicates.items():
pred_name = key.split("|")[0]
new_key = key.replace(pred_name, pred_name + "_2")
predicates[new_key] = value
pred_names_set.add(pred_name)
for i in init_state:
if i in pred_names_set:
self.init_state[i] = init_state[i]
if len(self.init_state) == 0:
self.init_state['empty_init'] = [()]
self.model1.predicates['empty_init'] = 0
self.model2.predicates['empty_init'] = 0
for action in self.model1.actions.keys():
self.model1.actions[action]['empty_init'] = [Literal.POS, Literal.ABS]
for action in self.model2.actions.keys():
self.model1.actions[action]['empty_init'] = [Literal.POS, Literal.ABS]
def call_planner(self, domain_file, problem_file, result_file):
"""
This method calls the planner.
The planner can be either FF Planner (ff) or Madagascar (mg).
It needs to be set in config.py in the root directory.
:param domain_file: domain file (operator file) for the planner
:type domain_file: str
:param problem_file: problem file (fact file) for the planner
:type problem_file: str
:param result_file: result file to store output of the planner
:type result_file: str
:rtype: None
"""
if PLANNER == "FF":
param = FF_PATH + "ff"
param += " -o " + domain_file
param += " -f " + problem_file
param += " > " + result_file
elif PLANNER == "FD":
param = FD_PATH + "fast-downward.py "
param += " --plan-file ../" + FD_SAS_FILE
param += " --alias seq-sat-lama-2011"
param += " " + domain_file
param += " " + problem_file
# param += " --search \"astar(lmcut(), verbosity=silent)\""
else:
print("Error: No planner provided")
exit()
p = subprocess.Popen([param], shell=True)
p.wait()
if PLANNER == "FD":
f = open("../" + FD_SAS_FILE + ".1", "r")
_plan_found = True
_plan = ""
for x in f:
if ("found legal plan as follows"):
_plan_found = True
if ";" in x:
continue
if "(" in x and ")" in x:
k = copy.deepcopy(x)
_plan += "|".join(k.lower().rstrip().split()) + ")\n"
if "time spent" in x:
break
f.close()
f = open(result_file, "w")
f.write(_plan)
f.close()
def add_unknown_pred_to_model(self, model_actions):
temp_actions = copy.deepcopy(model_actions)
for actionName, predicateDict_m1 in temp_actions.items():
if (actionName, self.pal[1], Location.PRECOND) not in self.pal_tuple_dict.keys():
# This predicate and action might be incompatible
continue
predicateDict_m1['unknown'] = [Literal.POS, Literal.POS]
if self.pal_tuple_dict[(actionName, self.pal[1], Location.PRECOND)]:
predicateDict_m1['unknown'][0] = Literal.ABS
if self.pal_tuple_dict[(actionName, self.pal[1], Location.EFFECTS)]:
predicateDict_m1['unknown'][1] = Literal.NEG
# Remove unknown from current pal tuple's a,l
if self.pal[2] == Location.PRECOND:
temp_actions[self.pal[0]]['unknown'][int(self.pal[2]) - 1] = Literal.ABS
elif self.pal[2] == Location.EFFECTS:
temp_actions[self.pal[0]]['unknown'][int(self.pal[2]) - 1] = Literal.NEG
return temp_actions
def add_unknown_predicate(self):
temp_actions_m1 = self.add_unknown_pred_to_model(self.model1.actions)
temp_actions_m2 = self.add_unknown_pred_to_model(self.model2.actions)
return temp_actions_m1, temp_actions_m2
def write_query_to_file(self, fd, domain_name, objects, pred_type_mapping, action_parameters):
"""
This method creates files.
:param fd: file descriptor of the pddl file in which model will be written
:type fd: file descriptor
:param domain_name: domain name of the model
:type domain_name: str
:rtype: None
"""
use_unknown = True
self.write(fd, "(define (domain " + domain_name + ")\n")
self.write(fd, "(:requirements :strips :typing :conditional-effects :equality :negative-preconditions)\n")
####### Typing #######
self.write(fd, "(:types")
for t in objects.keys():
self.write(fd, " " + t)
self.write(fd, ")\n")
self.write(fd, "(:predicates ")
count = 0
preds_printed = []
for key, value in self.model1.predicates.items():
params = ""
cnt = 0
pred_name = key.split("|")[0]
if pred_name != 'empty_init':
for val in pred_type_mapping[pred_name]:
params = params + " ?" + val[0] + str(cnt) + " - " + val
cnt += 1
if count > 0:
self.write(fd, "\n")
for k in range(len("(:predicates ")):
self.write(fd, " ")
if pred_name not in preds_printed:
preds_printed.append(pred_name)
self.write(fd, "(" + pred_name + "_1 " + params + ")")
self.write(fd, "(" + pred_name + "_2 " + params + ")")
count += 1
self.write(fd, "\n")
if use_unknown:
# ADD UNKNOWN
for k in range(len("(:predicates ")):
self.write(fd, " ")
self.write(fd, "(unknown_1)")
self.write(fd, "(unknown_2)\n")
for k in range(len("(:predicates ")):
self.write(fd, " ")
self.write(fd, "(dummy_pred_1)")
self.write(fd, "(dummy_pred_2)")
self.write(fd, ")\n\n")
# Needed to copy because we will add key unknown later.
temp_actions_m1, temp_actions_m2 = self.add_unknown_predicate()
for actionName, predicateDict_m1 in temp_actions_m1.items():
head = "(:action " + actionName + "\n" + " :parameters"
self.write(fd, head)
count = 0
type_count = {}
param_ordering = []
for p in action_parameters[actionName]:
if p not in type_count.keys():
type_count[p] = 1
else:
type_count[p] = type_count[p] + 1
param_ordering.append(p + str(type_count[p]))
self.write(fd, " (")
head = ""
param_count = len(action_parameters[actionName])
for i in range(param_count):
if i > 0:
for k in range(len(" :parameters (")):
head += " "
head += "?" + param_ordering[i] + " - " + action_parameters[actionName][i] + "\n"
for k in range(len(" :parameters ")):
head += " "
head += ")\n"
self.write(fd, head)
count = -1
########### Write Precondition ###########
self.write(fd, " :precondition ")
equality_needed = False
if param_count > 1:
equality_needed = True
if equality_needed:
# Ensure none of the parameters are equal to each other
combs = combinations(list(range(0, param_count)), 2)
self.write(fd, "(and ")
for c in combs:
self.write(fd, "(not (= ")
for j in range(2):
i = c[j]
self.write(fd, "?" + param_ordering[i])
if (j == 0):
self.write(fd, " ")
else:
self.write(fd, ")) ")
self.write(fd, "\n")
for k in range(len(" :precondition (and ")):
self.write(fd, " ")
# Write precondition of M1 and Precondition of M2 in OR
# This ensures the models are distinguished if only one model
# can execute this action
self.write(fd, "(or \n")
for k in range(len(" :precondition (and (or ")):
self.write(fd, " ")
# Write predicate 1
head_m1 = ""
not_head_m1 = ""
for predicate, value in predicateDict_m1.items():
pred_split = predicate.split("|")
pred_name = pred_split[0]
t_value = copy.deepcopy(value)
if (t_value[0] == Literal.AN or t_value[0] == Literal.AP):
t_value[0] = Literal.ABS
elif (t_value[0] == Literal.NP):
t_value[0] = Literal.POS
if (t_value[0] != Literal.ABS) and not (
use_unknown and self.pal[0] == actionName and self.pal[1] == predicate):
param = "("
not_param = "("
if (t_value[0] == Literal.NEG):
param += "not ("
if (t_value[0] == Literal.POS):
not_param += "not ("
param += pred_name + "_1"
not_param += pred_name + "_1"
if len(pred_split) > 1:
pred_params = pred_split[1:]
for p in pred_params:
# print(p)
param += " ?" + param_ordering[int(p)]
not_param += " ?" + param_ordering[int(p)]
param += ")"
not_param += ")"
if (t_value[0] != Literal.ABS and t_value[0] != Literal.POS):
param += ")"
if (t_value[0] != Literal.ABS and t_value[0] != Literal.NEG):
not_param += ")"
for k in range(len(" :precondition (and (or (and ")):
head_m1 += " "
for k in range(len(" :precondition (and (or (and (or ")):
not_head_m1 += " "
head_m1 += param + "\n"
not_head_m1 += not_param + "\n"
elif (use_unknown and self.pal[0] == actionName and self.pal[1] == predicate):
if t_value[0] != Literal.ABS:
# Add +,-,unkn in or
param = "(or ("
not_param = "(or ("
if (t_value[0] == Literal.NEG):
param += "not ("
if (t_value[0] == Literal.POS):
not_param += "not ("
param += pred_name + "_1"
not_param += pred_name + "_1"
if len(pred_split) > 1:
pred_params = pred_split[1:]
for p in pred_params:
# print(p)
param += " ?" + param_ordering[int(p)]
not_param += " ?" + param_ordering[int(p)]
param += ")"
not_param += ")"
for k in range(len(" :precondition (and (or (and ")):
head_m1 += " "
for k in range(len(" :precondition (and (or (and (or ")):
not_head_m1 += " "
if (t_value[0] != Literal.ABS and t_value[0] != Literal.POS):
param += ")"
if (t_value[0] != Literal.ABS and t_value[0] != Literal.NEG):
not_param += ")"
head_m1 += param + "\n"
not_head_m1 += not_param + "\n"
for k in range(len(" :precondition (and (or (and (or ")):
head_m1 += " "
for k in range(len(" :precondition (and (or (and (or (or ")):
not_head_m1 += " "
head_m1 += "(unknown_1)\n"
not_head_m1 += "(unknown_1)\n"
for k in range(len(" :precondition (and (or (and ")):
head_m1 += " "
for k in range(len(" :precondition (and (or (and (or ")):
not_head_m1 += " "
head_m1 += ")\n"
not_head_m1 += ")\n"
self.write(fd, "(and \n" + head_m1)
for k in range(len(" :precondition (and (or ")):
self.write(fd, " ")
self.write(fd, ")\n")
head_m2 = ""
not_head_m2 = ""
predicateDict_m2 = temp_actions_m2[actionName]
for predicate, value in predicateDict_m2.items():
pred_split = predicate.split("|")
pred_name = pred_split[0]
t_value = copy.deepcopy(value)
if (t_value[0] == Literal.AN or t_value[0] == Literal.AP):
t_value[0] = Literal.ABS
elif (t_value[0] == Literal.NP):
t_value[0] = Literal.POS
if (t_value[0] != Literal.ABS) and not (
use_unknown and self.pal[0] == actionName and self.pal[1] == predicate):
not_param = "("
param = "("
if (t_value[0] == Literal.NEG):
param += "not ("
if (t_value[0] == Literal.POS):
not_param += "not ("
param += pred_name + "_2"
not_param += pred_name + "_2"
if len(pred_split) > 1:
pred_params = pred_split[1:]
for p in pred_params:
# print(p)
param += " ?" + param_ordering[int(p)]
not_param += " ?" + param_ordering[int(p)]
param += ")"
not_param += ")"
if (t_value[0] != Literal.ABS and t_value[0] != Literal.POS):
param += ")"
if (t_value[0] != Literal.ABS and t_value[0] != Literal.NEG):
not_param += ")"
for k in range(len(" :precondition (and (or (and ")):
head_m2 += " "
for k in range(len(" :precondition (and (or (and (or ")):
not_head_m2 += " "
head_m2 += param + "\n"
not_head_m2 += not_param + "\n"
elif (use_unknown and self.pal[0] == actionName and self.pal[1] == predicate):
if t_value[0] != Literal.ABS:
# Add +,-,unkn in or
param = "(or ("
not_param = "(or ("
if (t_value[0] == Literal.NEG):
param += "not ("
if (t_value[0] == Literal.POS):
not_param += "not ("
param += pred_name + "_2"
not_param += pred_name + "_2"
if len(pred_split) > 1:
pred_params = pred_split[1:]
for p in pred_params:
param += " ?" + param_ordering[int(p)]
not_param += " ?" + param_ordering[int(p)]
param += ")"
not_param += ")"
for k in range(len(" :precondition (and (or (and ")):
head_m2 += " "
for k in range(len(" :precondition (and (or (and (or ")):
not_head_m2 += " "
if (t_value[0] != Literal.ABS and t_value[0] != Literal.POS):
param += ")"
if (t_value[0] != Literal.ABS and t_value[0] != Literal.NEG):
not_param += ")"
head_m2 += param + "\n"
not_head_m2 += not_param + "\n"
for k in range(len(" :precondition (and (or (and (or ")):
head_m2 += " "
for k in range(len(" :precondition (and (or (and (or (or ")):
not_head_m2 += " "
head_m2 += "(unknown_2)\n"
not_head_m2 += "(unknown_2)\n"
for k in range(len(" :precondition (and (or (and ")):
head_m2 += " "
for k in range(len(" :precondition (and (or (and (or ")):
not_head_m2 += " "
head_m2 += ")\n"
not_head_m2 += ")\n"
for k in range(len(" :precondition (and (or ")):
self.write(fd, " ")
self.write(fd, "(and \n" + head_m2)
for k in range(len(" :precondition (and (or ")):
self.write(fd, " ")
self.write(fd, ")\n")
if equality_needed:
for k in range(len(" :precondition (and ")):
self.write(fd, " ")
self.write(fd, ")\n")
for k in range(len(" :precondition ")):
self.write(fd, " ")
self.write(fd, ")\n")
count = 0
self.write(fd, " :effect (and")
# When (prec(m1)) (eff(m1))
self.write(fd, " (when (and\n")
self.write(fd, head_m1 + head_m2)
for k in range(len(" :effect (and (when ")):
self.write(fd, " ")
self.write(fd, ")\n")
for k in range(len(" :effect (and (when ")):
self.write(fd, " ")
fd.write("(and \n")
for predicate, value in predicateDict_m1.items():
pred_split = predicate.split("|")
pred_name = pred_split[0]
t_value = copy.deepcopy(value)
if (t_value[1] == Literal.AN or t_value[1] == Literal.AP):
t_value[1] = Literal.ABS
elif (t_value[1] == Literal.NP):
t_value[1] = Literal.POS
param = ""
for k in range(len(" :precondition (and (or (and ")):
param += " "
if (t_value[1] != Literal.ABS):
param += "("
if (t_value[1] == Literal.NEG):
param += "not ("
param += pred_name + "_1"
if len(pred_split) > 1:
pred_params = pred_split[1:]
for p in pred_params:
# print(p)
param += " ?" + param_ordering[int(p)]
param += ")"
if (t_value[1] != Literal.ABS and t_value[1] != Literal.POS):
param += ")"
self.write(fd, param + "\n")
for predicate, value in predicateDict_m2.items():
pred_split = predicate.split("|")
pred_name = pred_split[0]
t_value = copy.deepcopy(value)
if (t_value[1] == Literal.AN or t_value[1] == Literal.AP):
t_value[1] = Literal.ABS
elif (t_value[1] == Literal.NP):
t_value[1] = Literal.POS
param = ""
for k in range(len(" :precondition (and (or (and ")):
param += " "
if (t_value[1] != Literal.ABS):
param += "("
if (t_value[1] == Literal.NEG):
param += "not ("
param += pred_name + "_2"
if len(pred_split) > 1:
pred_params = pred_split[1:]
for p in pred_params:
# print(p)
param += " ?" + param_ordering[int(p)]
param += ")"
if (t_value[1] != Literal.ABS and t_value[1] != Literal.POS):
param += ")"
self.write(fd, param + "\n")
for k in range(len(" :precondition (and ")):
self.write(fd, " ")
self.write(fd, ")\n")
for k in range(len(" :effect (and ")):
self.write(fd, " ")
self.write(fd, ")\n")
for k in range(len(" :effect (and ")):
self.write(fd, " ")
self.write(fd, "(when ")
# When (or (!(prec(m1))) (!(prec(m2)))) (create dummy diff)
self.write(fd, "(or \n")
for k in range(len(" :effect (and (when (or ")):
self.write(fd, " ")
self.write(fd, "(and \n" + head_m1)
for k in range(len(" :effect (and (when (or (and ")):
self.write(fd, " ")
self.write(fd, "(or \n" + not_head_m2)
for k in range(len(" :effect (and (when (or (and ")):
self.write(fd, " ")
self.write(fd, ")\n")
for k in range(len(" :effect (and (when (or ")):
self.write(fd, " ")
self.write(fd, ")\n")
for k in range(len(" :effect (and (when (or ")):
self.write(fd, " ")
self.write(fd, "(and \n" + head_m2)
for k in range(len(" :effect (and (when (or (and ")):
self.write(fd, " ")
self.write(fd, "(or \n" + not_head_m1)
for k in range(len(" :effect (and (when (or (and ")):
self.write(fd, " ")
self.write(fd, ")\n")
for k in range(len(" :effect (and (when (or ")):
self.write(fd, " ")
self.write(fd, ")\n")
for k in range(len(" :effect (and (when ")):
self.write(fd, " ")
self.write(fd, ")\n")
for k in range(len(" :effect (and (when ")):
self.write(fd, " ")
self.write(fd, "(and \n")
for k in range(len(" :effect (and (when (and ")):
self.write(fd, " ")
self.write(fd, "(dummy_pred_1)\n")
for k in range(len(" :effect (and (when (and ")):
self.write(fd, " ")
self.write(fd, "(not(dummy_pred_2))\n")
for k in range(len(" :effect (and (when ")):
self.write(fd, " ")
self.write(fd, ") \n")
for k in range(len(" :effect (and ")):
self.write(fd, " ")
self.write(fd, ")\n")
for k in range(len(" :effect ")):
self.write(fd, " ")
self.write(fd, ")\n")
self.write(fd, ")\n\n")
self.write(fd, ")\n")
def write(self, fd, txt):
self.debug_text += txt
fd.write(txt)
def get_plan_from_query(self,
init_state,
domain_name,
objects,
pred_type_mapping,
action_parameters):
f = open(Q_DOMAIN_FILE, "w")
self.write_query_to_file(f, domain_name, objects, pred_type_mapping, action_parameters)
f.close()
f = open(Q_PROBLEM_FILE, "w")
FileUtils.writeProblemToFile(self, f, domain_name, domain_name + "-1", True, objects, pred_type_mapping)
f.close()
self.call_planner(Q_DOMAIN_FILE, Q_PROBLEM_FILE, Q_RESULT_FILE)
self.plan = FileUtils.get_plan_from_file(Q_RESULT_FILE)
planRaw = self.plan
if len(planRaw) != 0:
f = open(Q_PLAN_FILE, "w")
FileUtils.writePlanToFile(self, f, init_state, domain_name, domain_name + "-1", objects)
f.close()
return planRaw
| 26,021 | 39.786834 | 114 | py |
DAAISy | DAAISy-main/src/query/genericQuery.py | #!/usr/local/bin/python3
# encoding: utf-8
class GenericQuery(object):
"""
This class serves as a template for the queries.
Each query class has to inherit this class.
"""
def __init__(self):
print("Generic Query")
def call_planner(self, domain_file, problem_file, result_file):
print("Call the planner")
| 349 | 20.875 | 67 | py |
DAAISy | DAAISy-main/src/query/__init__.py | from .genericQuery import GenericQuery
from .po_query import Query
from .exec_plan import ExecutePlan
| 103 | 19.8 | 38 | py |
EZ-VSL | EZ-VSL-main/test.py | import os
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import DataLoader
import utils
import numpy as np
import argparse
from model import EZVSL
from datasets import get_test_dataset, inverse_normalize
import cv2
def get_arguments():
parser = argparse.ArgumentParser()
parser.add_argument('--model_dir', type=str, default='./checkpoints', help='path to save trained model weights')
parser.add_argument('--experiment_name', type=str, default='ezvsl_vggss', help='experiment name (experiment folder set to "args.model_dir/args.experiment_name)"')
parser.add_argument('--save_visualizations', action='store_true', help='Set to store all VSL visualizations (saved in viz directory within experiment folder)')
# Dataset
parser.add_argument('--testset', default='flickr', type=str, help='testset (flickr or vggss)')
parser.add_argument('--test_data_path', default='', type=str, help='Root directory path of data')
parser.add_argument('--test_gt_path', default='', type=str)
parser.add_argument('--batch_size', default=1, type=int, help='Batch Size')
# Model
parser.add_argument('--tau', default=0.03, type=float, help='tau')
parser.add_argument('--out_dim', default=512, type=int)
parser.add_argument('--alpha', default=0.4, type=float, help='alpha')
# Distributed params
parser.add_argument('--workers', type=int, default=8)
parser.add_argument('--gpu', type=int, default=None)
parser.add_argument('--world_size', type=int, default=1)
parser.add_argument('--rank', type=int, default=0)
parser.add_argument('--node', type=str, default='localhost')
parser.add_argument('--port', type=int, default=12345)
parser.add_argument('--dist_url', type=str, default='tcp://localhost:12345')
parser.add_argument('--multiprocessing_distributed', action='store_true')
return parser.parse_args()
def main(args):
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
# Model dir
model_dir = os.path.join(args.model_dir, args.experiment_name)
viz_dir = os.path.join(model_dir, 'viz')
os.makedirs(viz_dir, exist_ok=True)
# Models
audio_visual_model = EZVSL(args.tau, args.out_dim)
from torchvision.models import resnet18
object_saliency_model = resnet18(pretrained=True)
object_saliency_model.avgpool = nn.Identity()
object_saliency_model.fc = nn.Sequential(
nn.Unflatten(1, (512, 7, 7)),
NormReducer(dim=1),
Unsqueeze(1)
)
if not torch.cuda.is_available():
print('using CPU, this will be slow')
elif args.multiprocessing_distributed:
if args.gpu is not None:
torch.cuda.set_device(args.gpu)
audio_visual_model.cuda(args.gpu)
object_saliency_model.cuda(args.gpu)
audio_visual_model = torch.nn.parallel.DistributedDataParallel(audio_visual_model, device_ids=[args.gpu])
object_saliency_model = torch.nn.parallel.DistributedDataParallel(object_saliency_model, device_ids=[args.gpu])
# Load weights
ckp_fn = os.path.join(model_dir, 'best.pth')
if os.path.exists(ckp_fn):
ckp = torch.load(ckp_fn, map_location='cpu')
audio_visual_model.load_state_dict({k.replace('module.', ''): ckp['model'][k] for k in ckp['model']})
print(f'loaded from {os.path.join(model_dir, "best.pth")}')
else:
print(f"Checkpoint not found: {ckp_fn}")
# Dataloader
testdataset = get_test_dataset(args)
testdataloader = DataLoader(testdataset, batch_size=args.batch_size, shuffle=False, num_workers=args.workers)
print("Loaded dataloader.")
validate(testdataloader, audio_visual_model, object_saliency_model, viz_dir, args)
@torch.no_grad()
def validate(testdataloader, audio_visual_model, object_saliency_model, viz_dir, args):
audio_visual_model.train(False)
object_saliency_model.train(False)
evaluator_av = utils.Evaluator()
evaluator_obj = utils.Evaluator()
evaluator_av_obj = utils.Evaluator()
for step, (image, spec, bboxes, name) in enumerate(testdataloader):
if args.gpu is not None:
spec = spec.cuda(args.gpu, non_blocking=True)
image = image.cuda(args.gpu, non_blocking=True)
# Compute S_AVL
heatmap_av = audio_visual_model(image.float(), spec.float())[1].unsqueeze(1)
heatmap_av = F.interpolate(heatmap_av, size=(224, 224), mode='bilinear', align_corners=True)
heatmap_av = heatmap_av.data.cpu().numpy()
# Compute S_OBJ
img_feat = object_saliency_model(image)
heatmap_obj = F.interpolate(img_feat, size=(224, 224), mode='bilinear', align_corners=True)
heatmap_obj = heatmap_obj.data.cpu().numpy()
# Compute eval metrics and save visualizations
for i in range(spec.shape[0]):
pred_av = utils.normalize_img(heatmap_av[i, 0])
pred_obj = utils.normalize_img(heatmap_obj[i, 0])
pred_av_obj = utils.normalize_img(pred_av * args.alpha + pred_obj * (1 - args.alpha))
gt_map = bboxes['gt_map'].data.cpu().numpy()
thr_av = np.sort(pred_av.flatten())[int(pred_av.shape[0] * pred_av.shape[1] * 0.5)]
evaluator_av.cal_CIOU(pred_av, gt_map, thr_av)
thr_obj = np.sort(pred_obj.flatten())[int(pred_obj.shape[0] * pred_obj.shape[1] * 0.5)]
evaluator_obj.cal_CIOU(pred_obj, gt_map, thr_obj)
thr_av_obj = np.sort(pred_av_obj.flatten())[int(pred_av_obj.shape[0] * pred_av_obj.shape[1] * 0.5)]
evaluator_av_obj.cal_CIOU(pred_av_obj, gt_map, thr_av_obj)
if args.save_visualizations:
denorm_image = inverse_normalize(image).squeeze(0).permute(1, 2, 0).cpu().numpy()[:, :, ::-1]
denorm_image = (denorm_image*255).astype(np.uint8)
cv2.imwrite(os.path.join(viz_dir, f'{name[i]}_image.jpg'), denorm_image)
# visualize bboxes on raw images
gt_boxes_img = utils.visualize(denorm_image, bboxes['bboxes'])
cv2.imwrite(os.path.join(viz_dir, f'{name[i]}_gt_boxes.jpg'), gt_boxes_img)
# visualize heatmaps
heatmap_img = np.uint8(pred_av*255)
heatmap_img = cv2.applyColorMap(heatmap_img[:, :, np.newaxis], cv2.COLORMAP_JET)
fin = cv2.addWeighted(heatmap_img, 0.8, np.uint8(denorm_image), 0.2, 0)
cv2.imwrite(os.path.join(viz_dir, f'{name[i]}_pred_av.jpg'), fin)
heatmap_img = np.uint8(pred_obj*255)
heatmap_img = cv2.applyColorMap(heatmap_img[:, :, np.newaxis], cv2.COLORMAP_JET)
fin = cv2.addWeighted(heatmap_img, 0.8, np.uint8(denorm_image), 0.2, 0)
cv2.imwrite(os.path.join(viz_dir, f'{name[i]}_pred_obj.jpg'), fin)
heatmap_img = np.uint8(pred_av_obj*255)
heatmap_img = cv2.applyColorMap(heatmap_img[:, :, np.newaxis], cv2.COLORMAP_JET)
fin = cv2.addWeighted(heatmap_img, 0.8, np.uint8(denorm_image), 0.2, 0)
cv2.imwrite(os.path.join(viz_dir, f'{name[i]}_pred_av_obj.jpg'), fin)
print(f'{step+1}/{len(testdataloader)}: map_av={evaluator_av.finalize_AP50():.2f} map_obj={evaluator_obj.finalize_AP50():.2f} map_av_obj={evaluator_av_obj.finalize_AP50():.2f}')
def compute_stats(eval):
mAP = eval.finalize_AP50()
ciou = eval.finalize_cIoU()
auc = eval.finalize_AUC()
return mAP, ciou, auc
print('AV: AP50(cIoU)={}, Avg-cIoU={}, AUC={}'.format(*compute_stats(evaluator_av)))
print('Obj: AP50(cIoU)={}, Avg-cIoU={}, AUC={}'.format(*compute_stats(evaluator_obj)))
print('AV_Obj: AP50(cIoU)={}, Avg-cIoU={}, AUC={}'.format(*compute_stats(evaluator_av_obj)))
utils.save_iou(evaluator_av.ciou, 'av', viz_dir)
utils.save_iou(evaluator_obj.ciou, 'obj', viz_dir)
utils.save_iou(evaluator_av_obj.ciou, 'av_obj', viz_dir)
class NormReducer(nn.Module):
def __init__(self, dim):
super(NormReducer, self).__init__()
self.dim = dim
def forward(self, x):
return x.abs().mean(self.dim)
class Unsqueeze(nn.Module):
def __init__(self, dim):
super(Unsqueeze, self).__init__()
self.dim = dim
def forward(self, x):
return x.unsqueeze(self.dim)
if __name__ == "__main__":
main(get_arguments())
| 8,390 | 42.252577 | 185 | py |
EZ-VSL | EZ-VSL-main/audio_io.py | import av
# import torchaudio
import numpy as np
from fractions import Fraction
# def load_audio_torchaudio(fn):
# data, sr = torchaudio.load(fn)
# return data, sr
def open_audio_av(path):
container = av.open(path)
for stream in container.streams.video:
stream.codec_context.thread_type = av.codec.context.ThreadType.NONE
stream.codec_context.thread_count = 1
for stream in container.streams.audio:
stream.codec_context.thread_type = av.codec.context.ThreadType.NONE
stream.codec_context.thread_count = 1
return container
def load_audio_av(path=None, container=None, rate=None, start_time=None, duration=None, layout="mono"):
if container is None:
container = av.open(path)
audio_stream = container.streams.audio[0]
# Parse metadata
_ss = audio_stream.start_time * audio_stream.time_base if audio_stream.start_time is not None else 0.
_dur = audio_stream.duration * audio_stream.time_base
_ff = _ss + _dur
_rate = audio_stream.rate
if rate is None:
rate = _rate
if start_time is None:
start_time = _ss
if duration is None:
duration = _ff - start_time
duration = min(duration, _ff - start_time)
end_time = start_time + duration
resampler = av.audio.resampler.AudioResampler(format="s16p", layout=layout, rate=rate)
# Read data
chunks = []
container.seek(int(start_time * av.time_base))
for frame in container.decode(audio=0):
chunk_start_time = frame.pts * frame.time_base
chunk_end_time = chunk_start_time + Fraction(frame.samples, frame.rate)
if chunk_end_time < start_time: # Skip until start time
continue
if chunk_start_time > end_time: # Exit if clip has been extracted
break
try:
frame.pts = None
if resampler is not None:
chunks.append((chunk_start_time, resampler.resample(frame).to_ndarray()))
else:
chunks.append((chunk_start_time, frame.to_ndarray()))
except AttributeError:
break
# Trim for frame accuracy
audio = np.concatenate([af[1] for af in chunks], 1)
ss = int((start_time - chunks[0][0]) * rate)
t = int(duration * rate)
if ss < 0:
audio = np.pad(audio, ((0, 0), (-ss, 0)), 'constant', constant_values=0)
ss = 0
audio = audio[:, ss: ss+t]
# Normalize to [-1, 1]
audio = audio / np.iinfo(audio.dtype).max
return audio, rate
def audio_info_av(inpt, audio=None, format=None):
container = inpt
if isinstance(inpt, str):
try:
container = av.open(inpt, format=format)
except av.AVError:
return None, None
audio_stream = container.streams.audio[audio]
time_base = audio_stream.time_base
duration = audio_stream.duration * time_base
start_time = audio_stream.start_time * time_base
channels = audio_stream.channels
fps = audio_stream.rate
chunk_size = audio_stream.frame_size
chunks = audio_stream.frames
meta = {'channels': channels,
'fps': fps,
'start_time': start_time,
'duration': duration,
'chunks': chunks,
'chunk_size': chunk_size}
return meta
| 3,295 | 31 | 105 | py |
EZ-VSL | EZ-VSL-main/utils.py | import os
import json
from torch.optim import *
import numpy as np
from sklearn import metrics
class Evaluator(object):
def __init__(self):
super(Evaluator, self).__init__()
self.ciou = []
def cal_CIOU(self, infer, gtmap, thres=0.01):
infer_map = np.zeros((224, 224))
infer_map[infer >= thres] = 1
ciou = np.sum(infer_map*gtmap) / (np.sum(gtmap) + np.sum(infer_map * (gtmap==0)))
self.ciou.append(ciou)
return ciou, np.sum(infer_map*gtmap), (np.sum(gtmap)+np.sum(infer_map*(gtmap==0)))
def finalize_AUC(self):
cious = [np.sum(np.array(self.ciou) >= 0.05*i) / len(self.ciou)
for i in range(21)]
thr = [0.05*i for i in range(21)]
auc = metrics.auc(thr, cious)
return auc
def finalize_AP50(self):
ap50 = np.mean(np.array(self.ciou) >= 0.5)
return ap50
def finalize_cIoU(self):
ciou = np.mean(np.array(self.ciou))
return ciou
def clear(self):
self.ciou = []
def normalize_img(value, vmax=None, vmin=None):
vmin = value.min() if vmin is None else vmin
vmax = value.max() if vmax is None else vmax
if not (vmax - vmin) == 0:
value = (value - vmin) / (vmax - vmin) # vmin..vmax
return value
def visualize(raw_image, boxes):
import cv2
boxes_img = np.uint8(raw_image.copy())[:, :, ::-1]
for box in boxes:
xmin,ymin,xmax,ymax = int(box[0]),int(box[1]),int(box[2]),int(box[3])
cv2.rectangle(boxes_img[:, :, ::-1], (xmin, ymin), (xmax, ymax), (0,0,255), 1)
return boxes_img[:, :, ::-1]
def build_optimizer_and_scheduler_adam(model, args):
optimizer_grouped_parameters = filter(lambda p: p.requires_grad, model.parameters())
optimizer = Adam(optimizer_grouped_parameters, lr=args.init_lr)
scheduler = None
return optimizer, scheduler
def build_optimizer_and_scheduler_sgd(model, args):
optimizer_grouped_parameters = model.parameters()
optimizer = SGD(optimizer_grouped_parameters, lr=args.init_lr)
scheduler = None
return optimizer, scheduler
def save_json(data, filename, save_pretty=False, sort_keys=False):
with open(filename, mode='w', encoding='utf-8') as f:
if save_pretty:
f.write(json.dumps(data, indent=4, sort_keys=sort_keys))
else:
json.dump(data, f)
def save_iou(iou_list, suffix, output_dir):
# sorted iou
sorted_iou = np.sort(iou_list).tolist()
sorted_iou_indices = np.argsort(iou_list).tolist()
file_iou = open(os.path.join(output_dir,"iou_test_{}.txt".format(suffix)),"w")
for indice, value in zip(sorted_iou_indices, sorted_iou):
line = str(indice) + ',' + str(value) + '\n'
file_iou.write(line)
file_iou.close()
| 2,784 | 29.604396 | 90 | py |
EZ-VSL | EZ-VSL-main/model.py | import torch
from torch import nn
import torch.nn.functional as F
from torchvision.models import resnet18
class EZVSL(nn.Module):
def __init__(self, tau, dim):
super(EZVSL, self).__init__()
self.tau = tau
# Vision model
self.imgnet = resnet18(pretrained=True)
self.imgnet.avgpool = nn.Identity()
self.imgnet.fc = nn.Identity()
self.img_proj = nn.Conv2d(512, dim, kernel_size=(1, 1))
# Audio model
self.audnet = resnet18()
self.audnet.conv1 = nn.Conv2d(1, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False)
self.audnet.avgpool = nn.AdaptiveMaxPool2d((1, 1))
self.audnet.fc = nn.Identity()
self.aud_proj = nn.Linear(512, dim)
# Initialize weights (except pretrained visual model)
for net in [self.audnet, self.img_proj, self.aud_proj]:
for m in net.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(
m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.Linear):
nn.init.trunc_normal_(
m.weight, mean=0.0, std=0.01)
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.normal_(m.weight, mean=1, std=0.02)
nn.init.constant_(m.bias, 0)
def max_xmil_loss(self, img, aud):
B = img.shape[0]
Slogits = torch.einsum('nchw,mc->nmhw', img, aud) / self.tau
logits = Slogits.flatten(-2, -1).max(dim=-1)[0]
labels = torch.arange(B).long().to(img.device)
loss = F.cross_entropy(logits, labels) + F.cross_entropy(logits.permute(1, 0), labels)
return loss, Slogits
def forward(self, image, audio):
# Image
img = self.imgnet(image).unflatten(1, (512, 7, 7))
img = self.img_proj(img)
img = nn.functional.normalize(img, dim=1)
# Audio
aud = self.audnet(audio)
aud = self.aud_proj(aud)
aud = nn.functional.normalize(aud, dim=1)
# Compute loss
loss, logits = self.max_xmil_loss(img, aud)
# Compute avl maps
with torch.no_grad():
B = img.shape[0]
Savl = logits[torch.arange(B), torch.arange(B)]
return loss, Savl | 2,348 | 35.138462 | 107 | py |
EZ-VSL | EZ-VSL-main/datasets.py | import os
import csv
import numpy as np
from torch.utils.data import Dataset
from torchvision import transforms
from PIL import Image
from scipy import signal
import random
import json
import xml.etree.ElementTree as ET
from audio_io import load_audio_av, open_audio_av
def load_image(path):
return Image.open(path).convert('RGB')
def load_spectrogram(path, dur=3.):
# Load audio
audio_ctr = open_audio_av(path)
audio_dur = audio_ctr.streams.audio[0].duration * audio_ctr.streams.audio[0].time_base
audio_ss = max(float(audio_dur)/2 - dur/2, 0)
audio, samplerate = load_audio_av(container=audio_ctr, start_time=audio_ss, duration=dur)
# To Mono
audio = np.clip(audio, -1., 1.).mean(0)
# Repeat if audio is too short
if audio.shape[0] < samplerate * dur:
n = int(samplerate * dur / audio.shape[0]) + 1
audio = np.tile(audio, n)
audio = audio[:int(samplerate * dur)]
frequencies, times, spectrogram = signal.spectrogram(audio, samplerate, nperseg=512, noverlap=274)
spectrogram = np.log(spectrogram + 1e-7)
return spectrogram
def load_all_bboxes(annotation_dir, format='flickr'):
gt_bboxes = {}
if format == 'flickr':
anno_files = os.listdir(annotation_dir)
for filename in anno_files:
file = filename.split('.')[0]
gt = ET.parse(f"{annotation_dir}/{filename}").getroot()
bboxes = []
for child in gt:
for childs in child:
bbox = []
if childs.tag == 'bbox':
for index, ch in enumerate(childs):
if index == 0:
continue
bbox.append(int(224 * int(ch.text)/256))
bboxes.append(bbox)
gt_bboxes[file] = bboxes
elif format == 'vggss':
with open('metadata/vggss.json') as json_file:
annotations = json.load(json_file)
for annotation in annotations:
bboxes = [(np.clip(np.array(bbox), 0, 1) * 224).astype(int) for bbox in annotation['bbox']]
gt_bboxes[annotation['file']] = bboxes
return gt_bboxes
def bbox2gtmap(bboxes, format='flickr'):
gt_map = np.zeros([224, 224])
for xmin, ymin, xmax, ymax in bboxes:
temp = np.zeros([224, 224])
temp[ymin:ymax, xmin:xmax] = 1
gt_map += temp
if format == 'flickr':
# Annotation consensus
gt_map = gt_map / 2
gt_map[gt_map > 1] = 1
elif format == 'vggss':
# Single annotation
gt_map[gt_map > 0] = 1
return gt_map
class AudioVisualDataset(Dataset):
def __init__(self, image_files, audio_files, image_path, audio_path, audio_dur=3., image_transform=None, audio_transform=None, all_bboxes=None, bbox_format='flickr'):
super().__init__()
self.audio_path = audio_path
self.image_path = image_path
self.audio_dur = audio_dur
self.audio_files = audio_files
self.image_files = image_files
self.all_bboxes = all_bboxes
self.bbox_format = bbox_format
self.image_transform = image_transform
self.audio_transform = audio_transform
def getitem(self, idx):
file = self.image_files[idx]
file_id = file.split('.')[0]
# Image
img_fn = os.path.join(self.image_path, self.image_files[idx])
frame = self.image_transform(load_image(img_fn))
# Audio
audio_fn = os.path.join(self.audio_path, self.audio_files[idx])
spectrogram = self.audio_transform(load_spectrogram(audio_fn))
bboxes = {}
if self.all_bboxes is not None:
bboxes['bboxes'] = self.all_bboxes[file_id]
bboxes['gt_map'] = bbox2gtmap(self.all_bboxes[file_id], self.bbox_format)
return frame, spectrogram, bboxes, file_id
def __len__(self):
return len(self.image_files)
def __getitem__(self, idx):
try:
return self.getitem(idx)
except Exception:
return self.getitem(random.sample(range(len(self)), 1)[0])
def get_train_dataset(args):
audio_path = f"{args.train_data_path}/audio/"
image_path = f"{args.train_data_path}/frames/"
# List directory
audio_files = {fn.split('.wav')[0] for fn in os.listdir(audio_path) if fn.endswith('.wav')}
image_files = {fn.split('.jpg')[0] for fn in os.listdir(image_path) if fn.endswith('.jpg')}
avail_files = audio_files.intersection(image_files)
print(f"{len(avail_files)} available files")
# Subsample if specified
if args.trainset.lower() in {'vggss', 'flickr'}:
pass # use full dataset
else:
subset = set(open(f"metadata/{args.trainset}.txt").read().splitlines())
avail_files = avail_files.intersection(subset)
print(f"{len(avail_files)} valid subset files")
avail_files = sorted(list(avail_files))
audio_files = sorted([dt+'.wav' for dt in avail_files])
image_files = sorted([dt+'.jpg' for dt in avail_files])
# Transforms
image_transform = transforms.Compose([
transforms.Resize(int(224 * 1.1), Image.BICUBIC),
transforms.RandomCrop((224, 224)),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])])
audio_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=[0.0], std=[12.0])])
return AudioVisualDataset(
image_files=image_files,
audio_files=audio_files,
image_path=image_path,
audio_path=audio_path,
audio_dur=3.,
image_transform=image_transform,
audio_transform=audio_transform
)
def get_test_dataset(args):
audio_path = args.test_data_path + 'audio/'
image_path = args.test_data_path + 'frames/'
if args.testset == 'flickr':
testcsv = 'metadata/flickr_test.csv'
elif args.testset == 'vggss':
testcsv = 'metadata/vggss_test.csv'
elif args.testset == 'vggss_heard':
testcsv = 'metadata/vggss_heard_test.csv'
elif args.testset == 'vggss_unheard':
testcsv = 'metadata/vggss_unheard_test.csv'
else:
raise NotImplementedError
bbox_format = {'flickr': 'flickr',
'vggss': 'vggss',
'vggss_heard': 'vggss',
'vggss_unheard': 'vggss'}[args.testset]
# Retrieve list of audio and video files
testset = set([item[0] for item in csv.reader(open(testcsv))])
# Intersect with available files
audio_files = {fn.split('.wav')[0] for fn in os.listdir(audio_path)}
image_files = {fn.split('.jpg')[0] for fn in os.listdir(image_path)}
avail_files = audio_files.intersection(image_files)
testset = testset.intersection(avail_files)
testset = sorted(list(testset))
image_files = [dt+'.jpg' for dt in testset]
audio_files = [dt+'.wav' for dt in testset]
# Bounding boxes
all_bboxes = load_all_bboxes(args.test_gt_path, format=bbox_format)
# Transforms
image_transform = transforms.Compose([
transforms.Resize((224, 224), Image.BICUBIC),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])])
audio_transform = transforms.Compose([transforms.ToTensor(),
transforms.Normalize(mean=[0.0], std=[12.0])])
return AudioVisualDataset(
image_files=image_files,
audio_files=audio_files,
image_path=image_path,
audio_path=audio_path,
audio_dur=3.,
image_transform=image_transform,
audio_transform=audio_transform,
all_bboxes=all_bboxes,
bbox_format=bbox_format
)
def inverse_normalize(tensor):
inverse_mean = [-0.485/0.229, -0.456/0.224, -0.406/0.225]
inverse_std = [1.0/0.229, 1.0/0.224, 1.0/0.225]
tensor = transforms.Normalize(inverse_mean, inverse_std)(tensor)
return tensor
| 8,126 | 33.004184 | 170 | py |
EZ-VSL | EZ-VSL-main/train.py | import os
import argparse
import builtins
import time
import numpy as np
import torch
import torch.nn.functional as F
from torch import multiprocessing as mp
import torch.distributed as dist
import utils
from model import EZVSL
from datasets import get_train_dataset, get_test_dataset
def get_arguments():
parser = argparse.ArgumentParser()
parser.add_argument('--model_dir', type=str, default='./checkpoints', help='path to save trained model weights')
parser.add_argument('--experiment_name', type=str, default='ezvsl_vggss', help='experiment name (used for checkpointing and logging)')
# Data params
parser.add_argument('--trainset', default='vggss', type=str, help='trainset (flickr or vggss)')
parser.add_argument('--testset', default='vggss', type=str, help='testset,(flickr or vggss)')
parser.add_argument('--train_data_path', default='', type=str, help='Root directory path of train data')
parser.add_argument('--test_data_path', default='', type=str, help='Root directory path of test data')
parser.add_argument('--test_gt_path', default='', type=str)
# ez-vsl hyper-params
parser.add_argument('--out_dim', default=512, type=int)
parser.add_argument('--tau', default=0.03, type=float, help='tau')
# training/evaluation parameters
parser.add_argument("--epochs", type=int, default=20, help="number of epochs")
parser.add_argument('--batch_size', default=128, type=int, help='Batch Size')
parser.add_argument("--init_lr", type=float, default=0.0001, help="initial learning rate")
parser.add_argument("--seed", type=int, default=12345, help="random seed")
# Distributed params
parser.add_argument('--workers', type=int, default=8)
parser.add_argument('--gpu', type=int, default=None)
parser.add_argument('--world_size', type=int, default=1)
parser.add_argument('--rank', type=int, default=0)
parser.add_argument('--node', type=str, default='localhost')
parser.add_argument('--port', type=int, default=12345)
parser.add_argument('--dist_url', type=str, default='tcp://localhost:12345')
parser.add_argument('--multiprocessing_distributed', action='store_true')
return parser.parse_args()
def main(args):
mp.set_start_method('spawn')
args.dist_url = f'tcp://{args.node}:{args.port}'
print('Using url {}'.format(args.dist_url))
ngpus_per_node = torch.cuda.device_count()
if args.multiprocessing_distributed:
args.world_size = ngpus_per_node
mp.spawn(main_worker,
nprocs=ngpus_per_node,
args=(ngpus_per_node, args))
else:
main_worker(args.gpu, ngpus_per_node, args)
def main_worker(gpu, ngpus_per_node, args):
args.gpu = gpu
# suppress printing if not first GPU on each node
if args.multiprocessing_distributed and (args.gpu != 0 or args.rank != 0):
def print_pass(*args, **kwargs):
pass
builtins.print = print_pass
if args.gpu is not None:
print("Use GPU: {} for training".format(args.gpu))
# Setup distributed environment
if args.multiprocessing_distributed:
if args.dist_url == "env://" and args.rank == -1:
args.rank = int(os.environ["RANK"])
if args.multiprocessing_distributed:
# For multiprocessing distributed training, rank needs to be the
# global rank among all the processes
args.rank = args.rank * ngpus_per_node + gpu
dist.init_process_group(backend='nccl', init_method=args.dist_url,
world_size=args.world_size, rank=args.rank)
torch.distributed.barrier()
# Create model dir
model_dir = os.path.join(args.model_dir, args.experiment_name)
os.makedirs(model_dir, exist_ok=True)
utils.save_json(vars(args), os.path.join(model_dir, 'configs.json'), sort_keys=True, save_pretty=True)
# Create model
model = EZVSL(args.tau, args.out_dim)
if not torch.cuda.is_available():
print('using CPU, this will be slow')
elif args.multiprocessing_distributed:
if args.gpu is not None:
torch.cuda.set_device(args.gpu)
model.cuda(args.gpu)
# When using a single GPU per process and per
# DistributedDataParallel, we need to divide the batch size
# ourselves based on the total number of GPUs we have
args.batch_size = int(args.batch_size / args.world_size)
args.workers = int((args.workers + ngpus_per_node - 1) / ngpus_per_node)
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])
elif args.gpu is not None:
torch.cuda.set_device(args.gpu)
model.cuda(args.gpu)
print(model)
# Optimizer
optimizer, scheduler = utils.build_optimizer_and_scheduler_adam(model, args)
# Resume if possible
start_epoch, best_cIoU, best_Auc = 0, 0., 0.
if os.path.exists(os.path.join(model_dir, 'latest.pth')):
ckp = torch.load(os.path.join(model_dir, 'latest.pth'), map_location='cpu')
start_epoch, best_cIoU, best_Auc = ckp['epoch'], ckp['best_cIoU'], ckp['best_Auc']
model.load_state_dict(ckp['model'])
optimizer.load_state_dict(ckp['optimizer'])
print(f'loaded from {os.path.join(model_dir, "latest.pth")}')
# Dataloaders
traindataset = get_train_dataset(args)
train_sampler = None
if args.multiprocessing_distributed:
train_sampler = torch.utils.data.distributed.DistributedSampler(traindataset)
train_loader = torch.utils.data.DataLoader(
traindataset, batch_size=args.batch_size, shuffle=(train_sampler is None),
num_workers=args.workers, pin_memory=False, sampler=train_sampler, drop_last=True,
persistent_workers=args.workers > 0)
testdataset = get_test_dataset(args)
test_loader = torch.utils.data.DataLoader(
testdataset, batch_size=1, shuffle=False,
num_workers=args.workers, pin_memory=False, drop_last=False,
persistent_workers=args.workers > 0)
print("Loaded dataloader.")
# =============================================================== #
# Training loop
cIoU, auc = validate(test_loader, model, args)
print(f'cIoU (epoch {start_epoch}): {cIoU}')
print(f'AUC (epoch {start_epoch}): {auc}')
print(f'best_cIoU: {best_cIoU}')
print(f'best_Auc: {best_Auc}')
for epoch in range(start_epoch, args.epochs):
if args.multiprocessing_distributed:
train_loader.sampler.set_epoch(epoch)
# Train
train(train_loader, model, optimizer, epoch, args)
# Evaluate
cIoU, auc = validate(test_loader, model, args)
print(f'cIoU (epoch {epoch+1}): {cIoU}')
print(f'AUC (epoch {epoch+1}): {auc}')
print(f'best_cIoU: {best_cIoU}')
print(f'best_Auc: {best_Auc}')
# Checkpoint
if args.rank == 0:
ckp = {'model': model.state_dict(),
'optimizer': optimizer.state_dict(),
'epoch': epoch+1,
'best_cIoU': best_cIoU,
'best_Auc': best_Auc}
torch.save(ckp, os.path.join(model_dir, 'latest.pth'))
print(f"Model saved to {model_dir}")
if cIoU >= best_cIoU:
best_cIoU, best_Auc = cIoU, auc
if args.rank == 0:
torch.save(ckp, os.path.join(model_dir, 'best.pth'))
def train(train_loader, model, optimizer, epoch, args):
model.train()
batch_time = AverageMeter('Time', ':6.3f')
data_time = AverageMeter('Data', ':6.3f')
loss_mtr = AverageMeter('Loss', ':.3f')
progress = ProgressMeter(
len(train_loader),
[batch_time, data_time, loss_mtr],
prefix="Epoch: [{}]".format(epoch),
)
end = time.time()
for i, (image, spec, _, _) in enumerate(train_loader):
data_time.update(time.time() - end)
if args.gpu is not None:
spec = spec.cuda(args.gpu, non_blocking=True)
image = image.cuda(args.gpu, non_blocking=True)
loss, _ = model(image.float(), spec.float())
loss_mtr.update(loss.item(), image.shape[0])
optimizer.zero_grad()
loss.backward()
optimizer.step()
batch_time.update(time.time() - end)
end = time.time()
if i % 10 == 0 or i == len(train_loader) - 1:
progress.display(i)
del loss
def validate(test_loader, model, args):
model.train(False)
evaluator = utils.Evaluator()
for step, (image, spec, bboxes, _) in enumerate(test_loader):
if torch.cuda.is_available():
spec = spec.cuda(args.gpu, non_blocking=True)
image = image.cuda(args.gpu, non_blocking=True)
avl_map = model(image.float(), spec.float())[1].unsqueeze(1)
avl_map = F.interpolate(avl_map, size=(224, 224), mode='bicubic', align_corners=False)
avl_map = avl_map.data.cpu().numpy()
for i in range(spec.shape[0]):
pred = utils.normalize_img(avl_map[i, 0])
gt_map = bboxes['gt_map'].data.cpu().numpy()
thr = np.sort(pred.flatten())[int(pred.shape[0] * pred.shape[1] / 2)]
evaluator.cal_CIOU(pred, gt_map, thr)
cIoU = evaluator.finalize_AP50()
AUC = evaluator.finalize_AUC()
return cIoU, AUC
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self, name, fmt=':f'):
self.name = name
self.fmt = fmt
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def __str__(self):
fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})'
return fmtstr.format(**self.__dict__)
class ProgressMeter(object):
def __init__(self, num_batches, meters, prefix="", fp=None):
self.batch_fmtstr = self._get_batch_fmtstr(num_batches)
self.meters = meters
self.prefix = prefix
self.fp = fp
def display(self, batch):
entries = [self.prefix + self.batch_fmtstr.format(batch)]
entries += [str(meter) for meter in self.meters]
msg = '\t'.join(entries)
print(msg, flush=True)
if self.fp is not None:
self.fp.write(msg+'\n')
def _get_batch_fmtstr(self, num_batches):
num_digits = len(str(num_batches // 1))
fmt = '{:' + str(num_digits) + 'd}'
return '[' + fmt + '/' + fmt.format(num_batches) + ']'
if __name__ == "__main__":
main(get_arguments())
| 10,736 | 36.152249 | 138 | py |
CLNet | CLNet-main/main.py | import torch
import torch.nn as nn
from utils.parser import args
from utils import logger, Trainer, Tester
from utils import init_device, init_model, FakeLR, WarmUpCosineAnnealingLR
from dataset import Cost2100DataLoader
def main():
logger.info('=> PyTorch Version: {}'.format(torch.__version__))
# Environment initialization
device, pin_memory = init_device(args.seed, args.cpu, args.gpu, args.cpu_affinity)
# Create the data loader
train_loader, val_loader, test_loader = Cost2100DataLoader(
root=args.data_dir,
batch_size=args.batch_size,
num_workers=args.workers,
pin_memory=pin_memory,
scenario=args.scenario)()
# Define model
model = init_model(args)
model.to(device)
# Define loss function
criterion = nn.MSELoss().to(device)
# Inference mode
if args.evaluate:
Tester(model, device, criterion)(test_loader)
return
# Define optimizer and scheduler
lr_init = 1e-3 if args.scheduler == 'const' else 2e-3
optimizer = torch.optim.Adam(model.parameters(), lr_init)
if args.scheduler == 'const':
scheduler = FakeLR(optimizer=optimizer)
else:
scheduler = WarmUpCosineAnnealingLR(optimizer=optimizer,
T_max=args.epochs * len(train_loader),
T_warmup=30 * len(train_loader),
eta_min=5e-5)
# Define the training pipeline
trainer = Trainer(model=model,
device=device,
optimizer=optimizer,
criterion=criterion,
scheduler=scheduler,
resume=args.resume,
chk_name=str(args.scenario)+'_'+str(args.cr))
# Start training
trainer.loop(args.epochs, train_loader, val_loader, test_loader)
best = 0
# Final testing
loss, rho, nmse = Tester(model, device, criterion)(test_loader)
print(f"\n=! Final test loss: {loss:.3e}"
f"\n test rho: {rho:.3e}"
f"\n test NMSE: {nmse:.3e}\n")
if nmse < best:
# model save
# save encoder
modelSave1 = './Modelsave/32encoder.pth.tar'
try:
torch.save({'state_dict': model.encoder.state_dict(), }, modelSave1)
except:
torch.save({'state_dict': model.module.encoder.state_dict(), }, modelSave1)
# save decoder
modelSave2 = './Modelsave/32decoder.pth.tar'
try:
torch.save({'state_dict': model.decoder.state_dict(), }, modelSave2)
except:
torch.save({'state_dict': model.module.decoder.state_dict(), }, modelSave2)
print('Model saved!')
best = nmse
if __name__ == "__main__":
main()
| 2,892 | 31.505618 | 91 | py |
CLNet | CLNet-main/dataset/cost2100.py | import os
import numpy as np
import scipy.io as sio
import torch
from torch.utils.data import DataLoader, TensorDataset
__all__ = ['Cost2100DataLoader', 'PreFetcher']
class PreFetcher:
r""" Data pre-fetcher to accelerate the data loading
"""
def __init__(self, loader):
self.ori_loader = loader
self.len = len(loader)
self.stream = torch.cuda.Stream()
self.next_input = None
def preload(self):
try:
self.next_input = next(self.loader)
except StopIteration:
self.next_input = None
return
with torch.cuda.stream(self.stream):
for idx, tensor in enumerate(self.next_input):
self.next_input[idx] = tensor.cuda(non_blocking=True)
def __len__(self):
return self.len
def __iter__(self):
self.loader = iter(self.ori_loader)
self.preload()
return self
def __next__(self):
torch.cuda.current_stream().wait_stream(self.stream)
input = self.next_input
if input is None:
raise StopIteration
for tensor in input:
tensor.record_stream(torch.cuda.current_stream())
self.preload()
return input
class Cost2100DataLoader(object):
r""" PyTorch DataLoader for COST2100 dataset.
"""
def __init__(self, root, batch_size, num_workers, pin_memory, scenario):
assert os.path.isdir(root)
assert scenario in {"in", "out"}
self.batch_size = batch_size
self.num_workers = num_workers
self.pin_memory = pin_memory
dir_train = os.path.join(root, f"DATA_Htrain{scenario}.mat")
dir_val = os.path.join(root, f"DATA_Hval{scenario}.mat")
dir_test = os.path.join(root, f"DATA_Htest{scenario}.mat")
dir_raw = os.path.join(root, f"DATA_HtestF{scenario}_all.mat")
channel, nt, nc, nc_expand = 2, 32, 32, 125
# Training data loading
data_train = sio.loadmat(dir_train)['HT']
data_train = torch.tensor(data_train, dtype=torch.float32).view(
data_train.shape[0], channel, nt, nc)
self.train_dataset = TensorDataset(data_train)
# Validation data loading
data_val = sio.loadmat(dir_val)['HT']
data_val = torch.tensor(data_val, dtype=torch.float32).view(
data_val.shape[0], channel, nt, nc)
self.val_dataset = TensorDataset(data_val)
# Test data loading, including the sparse data and the raw data
data_test = sio.loadmat(dir_test)['HT']
data_test = torch.tensor(data_test, dtype=torch.float32).view(
data_test.shape[0], channel, nt, nc)
raw_test = sio.loadmat(dir_raw)['HF_all']
real = torch.tensor(np.real(raw_test), dtype=torch.float32)
imag = torch.tensor(np.imag(raw_test), dtype=torch.float32)
raw_test = torch.cat((real.view(raw_test.shape[0], nt, nc_expand, 1),
imag.view(raw_test.shape[0], nt, nc_expand, 1)), dim=3)
self.test_dataset = TensorDataset(data_test, raw_test)
def __call__(self):
train_loader = DataLoader(self.train_dataset,
batch_size=self.batch_size,
num_workers=self.num_workers,
pin_memory=self.pin_memory,
shuffle=True)
val_loader = DataLoader(self.val_dataset,
batch_size=self.batch_size,
num_workers=self.num_workers,
pin_memory=self.pin_memory,
shuffle=False)
test_loader = DataLoader(self.test_dataset,
batch_size=self.batch_size,
num_workers=self.num_workers,
pin_memory=self.pin_memory,
shuffle=False)
# Accelerate CUDA data loading with pre-fetcher if GPU is used.
if self.pin_memory is True:
train_loader = PreFetcher(train_loader)
val_loader = PreFetcher(val_loader)
test_loader = PreFetcher(test_loader)
return train_loader, val_loader, test_loader
| 4,282 | 35.922414 | 85 | py |
CLNet | CLNet-main/dataset/__init__.py | from .cost2100 import Cost2100DataLoader
| 41 | 20 | 40 | py |
CLNet | CLNet-main/models/clnet.py | r""" The proposed CLNet
"""
import torch
import torch.nn as nn
from collections import OrderedDict
import torch.nn.functional as F
from utils import logger
__all__ = ["clnet"]
class ConvBN(nn.Sequential):
def __init__(self, in_planes, out_planes, kernel_size, stride=1, groups=1):
if not isinstance(kernel_size, int):
padding = [(i - 1) // 2 for i in kernel_size]
else:
padding = (kernel_size - 1) // 2
super(ConvBN, self).__init__(OrderedDict([
('conv', nn.Conv2d(in_planes, out_planes, kernel_size, stride,
padding=padding, groups=groups, bias=False)),
('bn', nn.BatchNorm2d(out_planes))
]))
class CRBlock(nn.Module):
def __init__(self):
super(CRBlock, self).__init__()
self.path1 = nn.Sequential(OrderedDict([
('conv3x3', ConvBN(2, 7, 3)),
('relu1', nn.LeakyReLU(negative_slope=0.3, inplace=True)),
('conv1x3', ConvBN(7, 7, [1, 3])),
('relu2', nn.LeakyReLU(negative_slope=0.3, inplace=True)),
('conv3x1', ConvBN(7, 7, [3, 1])),
]))
self.path2 = nn.Sequential(OrderedDict([
('conv1x5', ConvBN(2, 7, [1, 5])),
('relu', nn.LeakyReLU(negative_slope=0.3, inplace=True)),
('conv5x1', ConvBN(7, 7, [5, 1])),
]))
self.conv1x1 = ConvBN(7 * 2, 2, 1)
self.identity = nn.Identity()
self.relu = nn.LeakyReLU(negative_slope=0.3, inplace=True)
def forward(self, x):
identity = self.identity(x)
out1 = self.path1(x)
out2 = self.path2(x)
out = torch.cat((out1, out2), dim=1)
out = self.relu(out)
out = self.conv1x1(out)
out = self.relu(out + identity)
return out
class hsigmoid(nn.Module):
def forward(self, x):
out = F.relu6(x + 3, inplace=True) / 6
return out
class BasicConv(nn.Module):
def __init__(self, in_planes, out_planes, kernel_size, stride=1, padding=0, dilation=1, groups=1, relu=True, bn=True, bias=False):
super(BasicConv, self).__init__()
self.out_channels = out_planes
self.conv = nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias)
self.bn = nn.BatchNorm2d(out_planes,eps=1e-5, momentum=0.01, affine=True) if bn else None
self.relu = nn.ReLU() if relu else None
def forward(self, x):
x = self.conv(x)
if self.bn is not None:
x = self.bn(x)
if self.relu is not None:
x = self.relu(x)
return x
class ChannelPool(nn.Module):
def forward(self, x):
return torch.cat( (torch.max(x,1)[0].unsqueeze(1), torch.mean(x,1).unsqueeze(1)), dim=1 )
class SpatialGate(nn.Module):
def __init__(self):
super(SpatialGate, self).__init__()
kernel_size = 3
self.compress = ChannelPool()
self.spatial = BasicConv(2, 1, kernel_size, stride=1, padding=(kernel_size-1) // 2, relu=False)
def forward(self, x):
x_compress = self.compress(x)
x_out = self.spatial(x_compress)
scale = torch.sigmoid(x_out) # broadcasting
return x * scale
class SELayer(nn.Module):
def __init__(self, channel, reduction=16):
super(SELayer, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.fc = nn.Sequential(
nn.Linear(channel, channel // reduction, bias=False),
nn.ReLU(inplace=True),
nn.Linear(channel // reduction, channel, bias=False),
nn.Sigmoid()
)
def forward(self, x):
b, c, _, _ = x.size()
y = self.avg_pool(x).view(b, c)
y = self.fc(y).view(b, c, 1, 1)
return x * y.expand_as(x)
class Encoder(nn.Module):
def __init__(self, reduction=4):
super(Encoder, self).__init__()
total_size, in_channel, w, h = 2048, 2, 32, 32
self.encoder1 = nn.Sequential(OrderedDict([
("conv3x3_bn", ConvBN(in_channel, 2, 3)),
("relu1", nn.LeakyReLU(negative_slope=0.3, inplace=True)),
("conv1x9_bn", ConvBN(2, 2, [1, 9])),
("relu2", nn.LeakyReLU(negative_slope=0.3, inplace=True)),
("conv9x1_bn", ConvBN(2, 2, [9, 1])),
]))
self.encoder2 = ConvBN(in_channel, 32,1)
self.encoder_conv = nn.Sequential(OrderedDict([
("relu1", nn.LeakyReLU(negative_slope=0.3, inplace=True)),
("conv1x1_bn", ConvBN(34, 2, 1)),
("relu2", nn.LeakyReLU(negative_slope=0.3, inplace=True)),
]))
self.sa = SpatialGate()
self.se = SELayer(32)
self.replace_efc = nn.Conv1d(total_size,total_size // reduction,1)
def forward(self, x):
n, c, h, w = x.detach().size()
encode1 = self.encoder1(x)
encode1 = self.sa(encode1)
encode2 = self.encoder2(x)
encode2 = self.se(encode2)
out = torch.cat((encode1, encode2), dim=1)
out = self.encoder_conv(out)
out = out.view(n, -1)
out = out.unsqueeze(2) #[1,2048,1]
out = self.replace_efc(out) # [1,2048/cr,1]
return out
class Decoder(nn.Module):
def __init__(self, reduction=4):
super(Decoder, self).__init__()
total_size, in_channel, w, h = 2048, 2, 32, 32
self.replace_dfc = nn.ConvTranspose1d(total_size // reduction,total_size,1)
decoder = OrderedDict([
("conv5x5_bn", ConvBN(2, 2, 5)),
("relu", nn.LeakyReLU(negative_slope=0.3, inplace=True)),
("CRBlock1", CRBlock()),
("CRBlock2", CRBlock())
])
self.decoder_feature = nn.Sequential(decoder)
self.sigmoid = nn.Sigmoid()
self.hsig= hsigmoid()
for m in self.modules():
if isinstance(m, (nn.Conv2d, nn.Linear)):
nn.init.xavier_uniform_(m.weight)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def forward(self, x):
c,h,w = 2,32,32
out = self.replace_dfc(x) # [1,2048,1]
out = out.view(-1, c, h, w) #
out = self.decoder_feature(out)
out = self.hsig(out)
return out
class CLNet(nn.Module):
def __init__(self, reduction=4):
super(CLNet, self).__init__()
total_size, in_channel, w, h = 2048, 2, 32, 32
logger.info(f'reduction={reduction}')
self.encoder = Encoder(reduction)
self.decoder = Decoder(reduction)
for m in self.modules():
if isinstance(m, (nn.Conv2d, nn.Linear)):
nn.init.xavier_uniform_(m.weight)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def forward(self, x):
feature= self.encoder(x)
out = self.decoder(feature)
return out
def clnet(reduction=4):
r""" Create a proposed CLNet.
:param reduction: the reciprocal of compression ratio
:return: an instance of CLNet
"""
model = CLNet(reduction=reduction)
return model
| 7,266 | 32.957944 | 154 | py |
CLNet | CLNet-main/models/__init__.py | from .clnet import *
| 21 | 10 | 20 | py |
CLNet | CLNet-main/.ipynb_checkpoints/main-checkpoint.py | import torch
import torch.nn as nn
from utils.parser import args
from utils import logger, Trainer, Tester
from utils import init_device, init_model, FakeLR, WarmUpCosineAnnealingLR
from dataset import Cost2100DataLoader
def main():
logger.info('=> PyTorch Version: {}'.format(torch.__version__))
# Environment initialization
device, pin_memory = init_device(args.seed, args.cpu, args.gpu, args.cpu_affinity)
# Create the data loader
train_loader, val_loader, test_loader = Cost2100DataLoader(
root=args.data_dir,
batch_size=args.batch_size,
num_workers=args.workers,
pin_memory=pin_memory,
scenario=args.scenario)()
# Define model
model = init_model(args)
model.to(device)
# Define loss function
criterion = nn.MSELoss().to(device)
# Inference mode
if args.evaluate:
Tester(model, device, criterion)(test_loader)
return
# Define optimizer and scheduler
lr_init = 1e-3 if args.scheduler == 'const' else 2e-3
optimizer = torch.optim.Adam(model.parameters(), lr_init)
if args.scheduler == 'const':
scheduler = FakeLR(optimizer=optimizer)
else:
scheduler = WarmUpCosineAnnealingLR(optimizer=optimizer,
T_max=args.epochs * len(train_loader),
T_warmup=30 * len(train_loader),
eta_min=5e-5)
# Define the training pipeline
trainer = Trainer(model=model,
device=device,
optimizer=optimizer,
criterion=criterion,
scheduler=scheduler,
resume=args.resume,
chk_name=str(args.scenario)+'_'+str(args.cr))
# Start training
trainer.loop(args.epochs, train_loader, val_loader, test_loader)
best = 0
# Final testing
loss, rho, nmse = Tester(model, device, criterion)(test_loader)
print(f"\n=! Final test loss: {loss:.3e}"
f"\n test rho: {rho:.3e}"
f"\n test NMSE: {nmse:.3e}\n")
if nmse < best:
# model save
# save encoder
modelSave1 = './Modelsave/32encoder.pth.tar'
try:
torch.save({'state_dict': model.encoder.state_dict(), }, modelSave1)
except:
torch.save({'state_dict': model.module.encoder.state_dict(), }, modelSave1)
# save decoder
modelSave2 = './Modelsave/32decoder.pth.tar'
try:
torch.save({'state_dict': model.decoder.state_dict(), }, modelSave2)
except:
torch.save({'state_dict': model.module.decoder.state_dict(), }, modelSave2)
print('Model saved!')
best = nmse
if __name__ == "__main__":
main()
| 2,892 | 31.505618 | 91 | py |
CLNet | CLNet-main/utils/parser.py | import argparse
parser = argparse.ArgumentParser(description='CRNet PyTorch Training')
# ========================== Indispensable arguments ==========================
parser.add_argument('--data-dir', type=str, required=True,
help='the path of dataset.')
parser.add_argument('--scenario', type=str, required=True, choices=["in", "out"],
help="the channel scenario")
parser.add_argument('-b', '--batch-size', type=int, required=True, metavar='N',
help='mini-batch size')
parser.add_argument('-j', '--workers', type=int, metavar='N', required=True,
help='number of data loading workers')
# ============================= Optical arguments =============================
# Working mode arguments
parser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true',
help='evaluate model on validation set')
parser.add_argument('--pretrained', type=str, default=None,
help='using locally pre-trained model. The path of pre-trained model should be given')
parser.add_argument('--resume', type=str, metavar='PATH', default=None,
help='path to latest checkpoint (default: none)')
parser.add_argument('--seed', default=None, type=int,
help='seed for initializing training. ')
parser.add_argument('--gpu', default=None, type=int,
help='GPU id to use.')
parser.add_argument('--cpu', action='store_true',
help='disable GPU training (default: False)')
parser.add_argument('--cpu-affinity', default=None, type=str,
help='CPU affinity, like "0xffff"')
# Other arguments
parser.add_argument('--epochs', type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('--cr', metavar='N', type=int, default=4,
help='compression ratio')
parser.add_argument('--scheduler', type=str, default='const', choices=['const', 'cosine'],
help='learning rate scheduler')
args = parser.parse_args()
| 2,082 | 45.288889 | 106 | py |
CLNet | CLNet-main/utils/statics.py | import torch
from packaging import version
__all__ = ['AverageMeter', 'evaluator']
class AverageMeter(object):
r"""Computes and stores the average and current value
Imported from https://github.com/pytorch/examples/blob/master/imagenet/main.py#L247-L262
"""
def __init__(self, name):
self.reset()
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
self.name = name
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def __repr__(self):
return f"==> For {self.name}: sum={self.sum}; avg={self.avg}"
def evaluator(sparse_pred, sparse_gt, raw_gt):
r""" Evaluation of decoding implemented in PyTorch Tensor
Computes normalized mean square error (NMSE) and rho.
"""
with torch.no_grad():
# Basic params
nt = 32
nc = 32
nc_expand = 257
# De-centralize
sparse_gt = sparse_gt - 0.5
sparse_pred = sparse_pred - 0.5
# Calculate the NMSE
power_gt = sparse_gt[:, 0, :, :] ** 2 + sparse_gt[:, 1, :, :] ** 2
difference = sparse_gt - sparse_pred
mse = difference[:, 0, :, :] ** 2 + difference[:, 1, :, :] ** 2
nmse = 10 * torch.log10((mse.sum(dim=[1, 2]) / power_gt.sum(dim=[1, 2])).mean())
# Calculate the Rho
n = sparse_pred.size(0)
sparse_pred = sparse_pred.permute(0, 2, 3, 1) # Move the real/imaginary dim to the last
zeros = sparse_pred.new_zeros((n, nt, nc_expand - nc, 2))
# When pytorch version is above 1.7.0, complex number representation is changed from [a, b] to [a, b.j]
if version.parse(torch.__version__) > version.parse("1.7.0"):
sparse_pred = torch.view_as_complex(torch.cat((sparse_pred, zeros), dim=2))
raw_pred = torch.view_as_real(torch.fft.fft(sparse_pred))[:, :, :125, :]
else:
sparse_pred = torch.cat((sparse_pred, zeros), dim=2)
raw_pred = torch.fft(sparse_pred, signal_ndim=1)[:, :, :125, :]
norm_pred = raw_pred[..., 0] ** 2 + raw_pred[..., 1] ** 2
norm_pred = torch.sqrt(norm_pred.sum(dim=1))
norm_gt = raw_gt[..., 0] ** 2 + raw_gt[..., 1] ** 2
norm_gt = torch.sqrt(norm_gt.sum(dim=1))
real_cross = raw_pred[..., 0] * raw_gt[..., 0] + raw_pred[..., 1] * raw_gt[..., 1]
real_cross = real_cross.sum(dim=1)
imag_cross = raw_pred[..., 0] * raw_gt[..., 1] - raw_pred[..., 1] * raw_gt[..., 0]
imag_cross = imag_cross.sum(dim=1)
norm_cross = torch.sqrt(real_cross ** 2 + imag_cross ** 2)
rho = (norm_cross / (norm_pred * norm_gt)).mean()
return rho, nmse
| 2,882 | 34.158537 | 111 | py |
CLNet | CLNet-main/utils/logger.py | from datetime import datetime
import sys
import traceback
DEBUG = -1
INFO = 0
EMPH = 1
WARNING = 2
ERROR = 3
FATAL = 4
log_level = INFO
line_seg = ''.join(['*'] * 65)
class LoggerFatalError(SystemExit):
pass
def _format(level, messages):
timestr = datetime.strftime(datetime.now(), '%m.%d/%H:%M')
father = traceback.extract_stack()[-4]
func_info = f'{father[0].split("/")[-1]}:{str(father[1]).ljust(4, " ")}'
m = ' '.join(map(str, messages))
msg = f'{level} {timestr} {func_info}] {m}'
return msg
_log_file = None
_log_buffer = []
_RED = '\033[0;31m'
_GREEN = '\033[1;32m'
_LIGHT_RED = '\033[1;31m'
_ORANGE = '\033[0;33m'
_YELLOW = '\033[1;33m'
_NC = '\033[0m' # No Color
def set_file(fname):
global _log_file
global _log_buffer
if _log_file is not None:
warning("Change log file to %s" % fname)
_log_file.close()
_log_file = open(fname, 'w')
if len(_log_buffer):
for s in _log_buffer:
_log_file.write(s)
_log_file.flush()
def debug(*messages, file=None):
if log_level > DEBUG:
return
msg = _format('D', messages)
if file is None:
sys.stdout.write(_YELLOW + msg + _NC + '\n')
sys.stdout.flush()
else:
with open(file, 'a+') as f:
print(msg, file=f)
def info(*messages, file=None):
if log_level > INFO:
return
msg = _format('I', messages)
if file is None:
sys.stdout.write(msg + '\n')
sys.stdout.flush()
else:
with open(file, 'a+') as f:
print(msg, file=f)
def emph(*messages, file=None):
if log_level > EMPH:
return
msg = _format('EM', messages)
if file is None:
sys.stdout.write(_GREEN + msg + _NC + '\n')
sys.stdout.flush()
else:
with open(file, 'a+') as f:
print(msg, file=f)
def warning(*messages, file=None):
if log_level > WARNING:
return
msg = _format('W', messages)
if file is None:
sys.stderr.write(_ORANGE + msg + _NC + '\n')
sys.stderr.flush()
else:
with open(file, 'a+') as f:
print(msg, file=f)
def error(*messages, file=None):
if log_level > ERROR:
return
msg = _format('E', messages)
if file is None:
sys.stderr.write(_RED + msg + _NC + '\n')
sys.stderr.flush()
else:
with open(file, 'a+') as f:
print(msg, file=f)
def fatal(*messages, file=None):
if log_level > FATAL:
return
msg = _format('F', messages)
if file is None:
sys.stderr.write(_LIGHT_RED + msg + _NC + '\n')
sys.stderr.flush()
else:
with open(file, 'a+') as f:
print(msg, file=f)
raise LoggerFatalError(-1)
| 2,765 | 21.128 | 76 | py |
CLNet | CLNet-main/utils/scheduler.py | import math
from torch.optim.lr_scheduler import _LRScheduler
__all__ = ['WarmUpCosineAnnealingLR', 'FakeLR']
class WarmUpCosineAnnealingLR(_LRScheduler):
def __init__(self, optimizer, T_max, T_warmup, eta_min=0, last_epoch=-1):
self.T_max = T_max
self.T_warmup = T_warmup
self.eta_min = eta_min
super(WarmUpCosineAnnealingLR, self).__init__(optimizer, last_epoch)
def get_lr(self):
if self.last_epoch < self.T_warmup:
return [base_lr * self.last_epoch / self.T_warmup for base_lr in self.base_lrs]
else:
k = 1 + math.cos(math.pi * (self.last_epoch - self.T_warmup) / (self.T_max - self.T_warmup))
return [self.eta_min + (base_lr - self.eta_min) * k / 2 for base_lr in self.base_lrs]
class FakeLR(_LRScheduler):
def __init__(self, optimizer):
super(FakeLR, self).__init__(optimizer=optimizer)
def get_lr(self):
return self.base_lrs
| 955 | 33.142857 | 104 | py |
CLNet | CLNet-main/utils/init.py | import os
import random
import thop
import torch
from models import clnet
from utils import logger, line_seg
__all__ = ["init_device", "init_model"]
def init_device(seed=None, cpu=None, gpu=None, affinity=None):
# set the CPU affinity
if affinity is not None:
os.system(f'taskset -p {affinity} {os.getpid()}')
# Set the random seed
if seed is not None:
random.seed(seed)
torch.manual_seed(seed)
torch.backends.cudnn.deterministic = True
# Set the GPU id you choose
if gpu is not None:
os.environ['CUDA_VISIBLE_DEVICES'] = str(gpu)
# Env setup
if not cpu and torch.cuda.is_available():
device = torch.device('cuda')
torch.backends.cudnn.benchmark = True
if seed is not None:
torch.cuda.manual_seed(seed)
pin_memory = True
logger.info("Running on GPU%d" % (gpu if gpu else 0))
else:
pin_memory = False
device = torch.device('cpu')
logger.info("Running on CPU")
return device, pin_memory
def init_model(args):
# Model loading
model = clnet(reduction=args.cr)
if args.pretrained is not None:
assert os.path.isfile(args.pretrained)
state_dict = torch.load(args.pretrained,
map_location=torch.device('cpu'))['state_dict']
model.load_state_dict(state_dict,strict=False)
#model.load_state_dict(state_dict,strict=False) if errors, try this
logger.info("pretrained model loaded from {}".format(args.pretrained))
# Model flops and params counting
image = torch.randn([1, 2, 32, 32])
flops, params = thop.profile(model, inputs=(image,), verbose=False)
flops, params = thop.clever_format([flops, params], "%.3f")
# Model info logging
logger.info(f'=> Model Name: CLNet [pretrained: {args.pretrained}]')
logger.info(f'=> Model Config: compression ratio=1/{args.cr}')
logger.info(f'=> Model Flops: {flops}')
logger.info(f'=> Model Params Num: {params}\n')
logger.info(f'{line_seg}\n{model}\n{line_seg}\n')
return model
| 2,102 | 29.926471 | 79 | py |
CLNet | CLNet-main/utils/__init__.py | from . import logger
from .logger import log_level, line_seg
from .init import *
from .scheduler import *
from .solver import *
| 130 | 15.375 | 39 | py |
CLNet | CLNet-main/utils/solver.py | import time
import os
import torch
from collections import namedtuple
from utils import logger
from utils.statics import AverageMeter, evaluator
__all__ = ['Trainer', 'Tester']
field = ('nmse', 'rho', 'epoch')
Result = namedtuple('Result', field, defaults=(None,) * len(field))
class Trainer:
r""" The training pipeline for encoder-decoder architecture
"""
def __init__(self, model, device, optimizer, criterion, scheduler, chk_name, resume=None,
save_path='./Experiments/chk', print_freq=20, val_freq=10, test_freq=10):
# Basic arguments
self.model = model
self.optimizer = optimizer
self.criterion = criterion
self.scheduler = scheduler
self.device = device
# Verbose arguments
self.resume_file = resume
self.save_path = save_path
self.print_freq = print_freq
self.val_freq = val_freq
self.test_freq = test_freq
# Pipeline arguments
self.cur_epoch = 1
self.all_epoch = None
self.train_loss = None
self.val_loss = None
self.test_loss = None
self.best_rho = Result()
self.best_nmse = Result()
self.tester = Tester(model, device, criterion, print_freq)
self.test_loader = None
self.chk_name = chk_name
def loop(self, epochs, train_loader, val_loader, test_loader):
r""" The main loop function which runs training and validation iteratively.
Args:
epochs (int): The total epoch for training
train_loader (DataLoader): Data loader for training data.
val_loader (DataLoader): Data loader for validation data.
test_loader (DataLoader): Data loader for test data.
"""
self.all_epoch = epochs
self._resume()
for ep in range(self.cur_epoch, epochs + 1):
self.cur_epoch = ep
# conduct training, validation and test
self.train_loss = self.train(train_loader)
if ep % self.val_freq == 0:
self.val_loss = self.val(val_loader)
if ep % self.test_freq == 0:
self.test_loss, rho, nmse = self.test(test_loader)
else:
rho, nmse = None, None
# conduct saving, visualization and log printing
self._loop_postprocessing(rho, nmse)
def train(self, train_loader):
r""" train the model on the given data loader for one epoch.
Args:
train_loader (DataLoader): the training data loader
"""
self.model.train()
with torch.enable_grad():
return self._iteration(train_loader)
def val(self, val_loader):
r""" exam the model with validation set.
Args:
val_loader: (DataLoader): the validation data loader
"""
self.model.eval()
with torch.no_grad():
return self._iteration(val_loader)
def test(self, test_loader):
r""" Truly test the model on the test dataset for one epoch.
Args:
test_loader (DataLoader): the test data loader
"""
self.model.eval()
with torch.no_grad():
return self.tester(test_loader, verbose=False)
def _iteration(self, data_loader):
iter_loss = AverageMeter('Iter loss')
iter_time = AverageMeter('Iter time')
time_tmp = time.time()
for batch_idx, (sparse_gt, ) in enumerate(data_loader):
sparse_gt = sparse_gt.to(self.device)
sparse_pred = self.model(sparse_gt)
loss = self.criterion(sparse_pred, sparse_gt)
# Scheduler update, backward pass and optimization
if self.model.training:
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
self.scheduler.step()
# Log and visdom update
iter_loss.update(loss)
iter_time.update(time.time() - time_tmp)
time_tmp = time.time()
# plot progress
if (batch_idx + 1) % self.print_freq == 0:
logger.info(f'Epoch: [{self.cur_epoch}/{self.all_epoch}]'
f'[{batch_idx + 1}/{len(data_loader)}] '
f'lr: {self.scheduler.get_lr()[0]:.2e} | '
f'MSE loss: {iter_loss.avg:.3e} | '
f'time: {iter_time.avg:.3f}')
mode = 'Train' if self.model.training else 'Val'
logger.info(f'=> {mode} Loss: {iter_loss.avg:.3e}\n')
return iter_loss.avg
def _save(self, state, name):
if self.save_path is None:
logger.warning('No path to save checkpoints.')
return
os.makedirs(self.save_path, exist_ok=True)
torch.save(state, os.path.join(self.save_path, name))
def _resume(self):
r""" protected function which resume from checkpoint at the beginning of training.
"""
if self.resume_file is None:
return None
assert os.path.isfile(self.resume_file)
logger.info(f'=> loading checkpoint {self.resume_file}')
checkpoint = torch.load(self.resume_file)
self.cur_epoch = checkpoint['epoch']
self.model.load_state_dict(checkpoint['state_dict'])
self.optimizer.load_state_dict(checkpoint['optimizer'])
self.scheduler.load_state_dict(checkpoint['scheduler'])
self.best_rho = checkpoint['best_rho']
self.best_nmse = checkpoint['best_nmse']
self.cur_epoch += 1 # start from the next epoch
logger.info(f'=> successfully loaded checkpoint {self.resume_file} '
f'from epoch {checkpoint["epoch"]}.\n')
def _loop_postprocessing(self, rho, nmse):
r""" private function which makes loop() function neater.
"""
# save state generate
state = {
'epoch': self.cur_epoch,
'state_dict': self.model.state_dict(),
'optimizer': self.optimizer.state_dict(),
'scheduler': self.scheduler.state_dict(),
'best_rho': self.best_rho,
'best_nmse': self.best_nmse
}
# save model with best rho and nmse
if rho is not None:
if self.best_rho.rho is None or self.best_rho.rho < rho:
self.best_rho = Result(rho=rho, nmse=nmse, epoch=self.cur_epoch)
state['best_rho'] = self.best_rho
self._save(state, name=f"best_rho_{self.chk_name}.pth")
if self.best_nmse.nmse is None or self.best_nmse.nmse > nmse:
self.best_nmse = Result(rho=rho, nmse=nmse, epoch=self.cur_epoch)
state['best_nmse'] = self.best_nmse
self._save(state, name=f"best_nmse_{self.chk_name}.pth")
self._save(state, name=f'last_{self.chk_name}.pth')
# print current best results
if self.best_rho.rho is not None:
print(f'\n=! Best rho: {self.best_rho.rho:.3e} ('
f'Corresponding nmse={self.best_rho.nmse:.3e}; '
f'epoch={self.best_rho.epoch})'
f'\n Best NMSE: {self.best_nmse.nmse:.3e} ('
f'Corresponding rho={self.best_nmse.rho:.3e}; '
f'epoch={self.best_nmse.epoch})\n')
class Tester:
r""" The testing interface for classification
"""
def __init__(self, model, device, criterion, print_freq=20):
self.model = model
self.device = device
self.criterion = criterion
self.print_freq = print_freq
def __call__(self, test_data, verbose=True):
r""" Runs the testing procedure.
Args:
test_data (DataLoader): Data loader for validation data.
"""
self.model.eval()
with torch.no_grad():
loss, rho, nmse = self._iteration(test_data)
if verbose:
print(f'\n=> Test result: \nloss: {loss:.3e}'
f' rho: {rho:.3e} NMSE: {nmse:.3e}\n')
return loss, rho, nmse
def _iteration(self, data_loader):
r""" protected function which test the model on given data loader for one epoch.
"""
iter_rho = AverageMeter('Iter rho')
iter_nmse = AverageMeter('Iter nmse')
iter_loss = AverageMeter('Iter loss')
iter_time = AverageMeter('Iter time')
time_tmp = time.time()
for batch_idx, (sparse_gt, raw_gt) in enumerate(data_loader):
sparse_gt = sparse_gt.to(self.device)
sparse_pred = self.model(sparse_gt)
loss = self.criterion(sparse_pred, sparse_gt)
rho, nmse = evaluator(sparse_pred, sparse_gt, raw_gt)
# Log and visdom update
iter_loss.update(loss)
iter_rho.update(rho)
iter_nmse.update(nmse)
iter_time.update(time.time() - time_tmp)
time_tmp = time.time()
# plot progress
if (batch_idx + 1) % self.print_freq == 0:
logger.info(f'[{batch_idx + 1}/{len(data_loader)}] '
f'loss: {iter_loss.avg:.3e} | rho: {iter_rho.avg:.3e} | '
f'NMSE: {iter_nmse.avg:.3e} | time: {iter_time.avg:.3f}')
logger.info(f'=> Test rho:{iter_rho.avg:.3e} NMSE: {iter_nmse.avg:.3e}\n')
return iter_loss.avg, iter_rho.avg, iter_nmse.avg
| 9,472 | 34.215613 | 93 | py |
CLNet | CLNet-main/utils/.ipynb_checkpoints/__init__-checkpoint.py | from . import logger
from .logger import log_level, line_seg
from .init import *
from .scheduler import *
from .solver import *
| 130 | 15.375 | 39 | py |
CLNet | CLNet-main/utils/.ipynb_checkpoints/solver-checkpoint.py | import time
import os
import torch
from collections import namedtuple
from utils import logger
from utils.statics import AverageMeter, evaluator
__all__ = ['Trainer', 'Tester']
field = ('nmse', 'rho', 'epoch')
Result = namedtuple('Result', field, defaults=(None,) * len(field))
class Trainer:
r""" The training pipeline for encoder-decoder architecture
"""
def __init__(self, model, device, optimizer, criterion, scheduler, chk_name, resume=None,
save_path='./Experiments/chk', print_freq=20, val_freq=10, test_freq=10):
# Basic arguments
self.model = model
self.optimizer = optimizer
self.criterion = criterion
self.scheduler = scheduler
self.device = device
# Verbose arguments
self.resume_file = resume
self.save_path = save_path
self.print_freq = print_freq
self.val_freq = val_freq
self.test_freq = test_freq
# Pipeline arguments
self.cur_epoch = 1
self.all_epoch = None
self.train_loss = None
self.val_loss = None
self.test_loss = None
self.best_rho = Result()
self.best_nmse = Result()
self.tester = Tester(model, device, criterion, print_freq)
self.test_loader = None
self.chk_name = chk_name
def loop(self, epochs, train_loader, val_loader, test_loader):
r""" The main loop function which runs training and validation iteratively.
Args:
epochs (int): The total epoch for training
train_loader (DataLoader): Data loader for training data.
val_loader (DataLoader): Data loader for validation data.
test_loader (DataLoader): Data loader for test data.
"""
self.all_epoch = epochs
self._resume()
for ep in range(self.cur_epoch, epochs + 1):
self.cur_epoch = ep
# conduct training, validation and test
self.train_loss = self.train(train_loader)
if ep % self.val_freq == 0:
self.val_loss = self.val(val_loader)
if ep % self.test_freq == 0:
self.test_loss, rho, nmse = self.test(test_loader)
else:
rho, nmse = None, None
# conduct saving, visualization and log printing
self._loop_postprocessing(rho, nmse)
def train(self, train_loader):
r""" train the model on the given data loader for one epoch.
Args:
train_loader (DataLoader): the training data loader
"""
self.model.train()
with torch.enable_grad():
return self._iteration(train_loader)
def val(self, val_loader):
r""" exam the model with validation set.
Args:
val_loader: (DataLoader): the validation data loader
"""
self.model.eval()
with torch.no_grad():
return self._iteration(val_loader)
def test(self, test_loader):
r""" Truly test the model on the test dataset for one epoch.
Args:
test_loader (DataLoader): the test data loader
"""
self.model.eval()
with torch.no_grad():
return self.tester(test_loader, verbose=False)
def _iteration(self, data_loader):
iter_loss = AverageMeter('Iter loss')
iter_time = AverageMeter('Iter time')
time_tmp = time.time()
for batch_idx, (sparse_gt, ) in enumerate(data_loader):
sparse_gt = sparse_gt.to(self.device)
sparse_pred = self.model(sparse_gt)
loss = self.criterion(sparse_pred, sparse_gt)
# Scheduler update, backward pass and optimization
if self.model.training:
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
self.scheduler.step()
# Log and visdom update
iter_loss.update(loss)
iter_time.update(time.time() - time_tmp)
time_tmp = time.time()
# plot progress
if (batch_idx + 1) % self.print_freq == 0:
logger.info(f'Epoch: [{self.cur_epoch}/{self.all_epoch}]'
f'[{batch_idx + 1}/{len(data_loader)}] '
f'lr: {self.scheduler.get_lr()[0]:.2e} | '
f'MSE loss: {iter_loss.avg:.3e} | '
f'time: {iter_time.avg:.3f}')
mode = 'Train' if self.model.training else 'Val'
logger.info(f'=> {mode} Loss: {iter_loss.avg:.3e}\n')
return iter_loss.avg
def _save(self, state, name):
if self.save_path is None:
logger.warning('No path to save checkpoints.')
return
os.makedirs(self.save_path, exist_ok=True)
torch.save(state, os.path.join(self.save_path, name))
def _resume(self):
r""" protected function which resume from checkpoint at the beginning of training.
"""
if self.resume_file is None:
return None
assert os.path.isfile(self.resume_file)
logger.info(f'=> loading checkpoint {self.resume_file}')
checkpoint = torch.load(self.resume_file)
self.cur_epoch = checkpoint['epoch']
self.model.load_state_dict(checkpoint['state_dict'])
self.optimizer.load_state_dict(checkpoint['optimizer'])
self.scheduler.load_state_dict(checkpoint['scheduler'])
self.best_rho = checkpoint['best_rho']
self.best_nmse = checkpoint['best_nmse']
self.cur_epoch += 1 # start from the next epoch
logger.info(f'=> successfully loaded checkpoint {self.resume_file} '
f'from epoch {checkpoint["epoch"]}.\n')
def _loop_postprocessing(self, rho, nmse):
r""" private function which makes loop() function neater.
"""
# save state generate
state = {
'epoch': self.cur_epoch,
'state_dict': self.model.state_dict(),
'optimizer': self.optimizer.state_dict(),
'scheduler': self.scheduler.state_dict(),
'best_rho': self.best_rho,
'best_nmse': self.best_nmse
}
# save model with best rho and nmse
if rho is not None:
if self.best_rho.rho is None or self.best_rho.rho < rho:
self.best_rho = Result(rho=rho, nmse=nmse, epoch=self.cur_epoch)
state['best_rho'] = self.best_rho
self._save(state, name=f"best_rho_{self.chk_name}.pth")
if self.best_nmse.nmse is None or self.best_nmse.nmse > nmse:
self.best_nmse = Result(rho=rho, nmse=nmse, epoch=self.cur_epoch)
state['best_nmse'] = self.best_nmse
self._save(state, name=f"best_nmse_{self.chk_name}.pth")
self._save(state, name=f'last_{self.chk_name}.pth')
# print current best results
if self.best_rho.rho is not None:
print(f'\n=! Best rho: {self.best_rho.rho:.3e} ('
f'Corresponding nmse={self.best_rho.nmse:.3e}; '
f'epoch={self.best_rho.epoch})'
f'\n Best NMSE: {self.best_nmse.nmse:.3e} ('
f'Corresponding rho={self.best_nmse.rho:.3e}; '
f'epoch={self.best_nmse.epoch})\n')
class Tester:
r""" The testing interface for classification
"""
def __init__(self, model, device, criterion, print_freq=20):
self.model = model
self.device = device
self.criterion = criterion
self.print_freq = print_freq
def __call__(self, test_data, verbose=True):
r""" Runs the testing procedure.
Args:
test_data (DataLoader): Data loader for validation data.
"""
self.model.eval()
with torch.no_grad():
loss, rho, nmse = self._iteration(test_data)
if verbose:
print(f'\n=> Test result: \nloss: {loss:.3e}'
f' rho: {rho:.3e} NMSE: {nmse:.3e}\n')
return loss, rho, nmse
def _iteration(self, data_loader):
r""" protected function which test the model on given data loader for one epoch.
"""
iter_rho = AverageMeter('Iter rho')
iter_nmse = AverageMeter('Iter nmse')
iter_loss = AverageMeter('Iter loss')
iter_time = AverageMeter('Iter time')
time_tmp = time.time()
for batch_idx, (sparse_gt, raw_gt) in enumerate(data_loader):
sparse_gt = sparse_gt.to(self.device)
sparse_pred = self.model(sparse_gt)
loss = self.criterion(sparse_pred, sparse_gt)
rho, nmse = evaluator(sparse_pred, sparse_gt, raw_gt)
# Log and visdom update
iter_loss.update(loss)
iter_rho.update(rho)
iter_nmse.update(nmse)
iter_time.update(time.time() - time_tmp)
time_tmp = time.time()
# plot progress
if (batch_idx + 1) % self.print_freq == 0:
logger.info(f'[{batch_idx + 1}/{len(data_loader)}] '
f'loss: {iter_loss.avg:.3e} | rho: {iter_rho.avg:.3e} | '
f'NMSE: {iter_nmse.avg:.3e} | time: {iter_time.avg:.3f}')
logger.info(f'=> Test rho:{iter_rho.avg:.3e} NMSE: {iter_nmse.avg:.3e}\n')
return iter_loss.avg, iter_rho.avg, iter_nmse.avg
| 9,472 | 34.215613 | 93 | py |
CLNet | CLNet-main/utils/.ipynb_checkpoints/parser-checkpoint.py | import argparse
parser = argparse.ArgumentParser(description='CRNet PyTorch Training')
# ========================== Indispensable arguments ==========================
parser.add_argument('--data-dir', type=str, required=True,
help='the path of dataset.')
parser.add_argument('--scenario', type=str, required=True, choices=["in", "out"],
help="the channel scenario")
parser.add_argument('-b', '--batch-size', type=int, required=True, metavar='N',
help='mini-batch size')
parser.add_argument('-j', '--workers', type=int, metavar='N', required=True,
help='number of data loading workers')
# ============================= Optical arguments =============================
# Working mode arguments
parser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true',
help='evaluate model on validation set')
parser.add_argument('--pretrained', type=str, default=None,
help='using locally pre-trained model. The path of pre-trained model should be given')
parser.add_argument('--resume', type=str, metavar='PATH', default=None,
help='path to latest checkpoint (default: none)')
parser.add_argument('--seed', default=None, type=int,
help='seed for initializing training. ')
parser.add_argument('--gpu', default=None, type=int,
help='GPU id to use.')
parser.add_argument('--cpu', action='store_true',
help='disable GPU training (default: False)')
parser.add_argument('--cpu-affinity', default=None, type=str,
help='CPU affinity, like "0xffff"')
# Other arguments
parser.add_argument('--epochs', type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('--cr', metavar='N', type=int, default=4,
help='compression ratio')
parser.add_argument('--scheduler', type=str, default='const', choices=['const', 'cosine'],
help='learning rate scheduler')
args = parser.parse_args()
| 2,082 | 45.288889 | 106 | py |
CLNet | CLNet-main/utils/.ipynb_checkpoints/init-checkpoint.py | import os
import random
import thop
import torch
from models import clnet
from utils import logger, line_seg
__all__ = ["init_device", "init_model"]
def init_device(seed=None, cpu=None, gpu=None, affinity=None):
# set the CPU affinity
if affinity is not None:
os.system(f'taskset -p {affinity} {os.getpid()}')
# Set the random seed
if seed is not None:
random.seed(seed)
torch.manual_seed(seed)
torch.backends.cudnn.deterministic = True
# Set the GPU id you choose
if gpu is not None:
os.environ['CUDA_VISIBLE_DEVICES'] = str(gpu)
# Env setup
if not cpu and torch.cuda.is_available():
device = torch.device('cuda')
torch.backends.cudnn.benchmark = True
if seed is not None:
torch.cuda.manual_seed(seed)
pin_memory = True
logger.info("Running on GPU%d" % (gpu if gpu else 0))
else:
pin_memory = False
device = torch.device('cpu')
logger.info("Running on CPU")
return device, pin_memory
def init_model(args):
# Model loading
model = clnet(reduction=args.cr)
if args.pretrained is not None:
assert os.path.isfile(args.pretrained)
state_dict = torch.load(args.pretrained,
map_location=torch.device('cpu'))['state_dict']
model.load_state_dict(state_dict,strict=False)
#model.load_state_dict(state_dict,strict=False) if errors, try this
logger.info("pretrained model loaded from {}".format(args.pretrained))
# Model flops and params counting
image = torch.randn([1, 2, 32, 32])
flops, params = thop.profile(model, inputs=(image,), verbose=False)
flops, params = thop.clever_format([flops, params], "%.3f")
# Model info logging
logger.info(f'=> Model Name: CLNet [pretrained: {args.pretrained}]')
logger.info(f'=> Model Config: compression ratio=1/{args.cr}')
logger.info(f'=> Model Flops: {flops}')
logger.info(f'=> Model Params Num: {params}\n')
logger.info(f'{line_seg}\n{model}\n{line_seg}\n')
return model
| 2,102 | 29.926471 | 79 | py |
modir | modir-master/drivers/run_warmup.py | import sys
sys.path += ["../"]
import pandas as pd
from transformers import glue_compute_metrics as compute_metrics, glue_output_modes as output_modes, glue_processors as processors
from transformers import (
AdamW,
RobertaConfig,
RobertaForSequenceClassification,
RobertaTokenizer,
get_linear_schedule_with_warmup,
RobertaModel,
)
import transformers
from utils.eval_mrr import passage_dist_eval
from model.models import MSMarcoConfigDict
from model.domain_classifier import DomainClassifier, DummyModule
from utils.lamb import Lamb
from utils.modir_utils import compute_total_grad_L2_norm, intrain_dev_eval, intrain_save_checkpoint
import os
from os import listdir
from os.path import isfile, join
import argparse
import glob
import json
import logging
import random
import numpy as np
import torch
from tqdm import tqdm, trange
import torch.distributed as dist
from torch.optim import SGD
from torch.optim.lr_scheduler import CosineAnnealingLR, StepLR
from torch import nn
from utils.util import getattr_recursive, set_seed, is_first_worker
from utils.modir_utils import (
compute_total_grad_L2_norm, intrain_dev_eval, intrain_save_checkpoint,
build_dl_iter_from_file, get_next,
build_input_from_batch, get_module
)
try:
from apex import amp
except ImportError:
print("apex not imported")
try:
from torch.utils.tensorboard import SummaryWriter
except ImportError:
from tensorboardX import SummaryWriter
logger = logging.getLogger(__name__)
def train(args, model, dc_model, tokenizer, train_file, tgd_file, file_process_fn):
""" Train the model """
tb_writer = None
if is_first_worker():
tb_writer = SummaryWriter(log_dir=args.log_dir)
args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)
real_batch_size = args.train_batch_size * args.gradient_accumulation_steps * \
(torch.distributed.get_world_size() if args.local_rank != -1 else 1)
# Create a static copy of dc_model
static_dc_model = DomainClassifier(args)
static_dc_model.to(args.device)
if args.max_steps > 0:
t_total = args.max_steps
else:
t_total = args.expected_train_size // real_batch_size * args.num_train_epochs
# layerwise optimization for lamb
optimizer_grouped_parameters = []
layer_optim_params = set()
for layer_name in ["roberta.embeddings", "score_out", "downsample1", "downsample2", "downsample3", "embeddingHead"]:
layer = getattr_recursive(model, layer_name)
if layer is not None:
optimizer_grouped_parameters.append({"params": layer.parameters()})
for p in layer.parameters():
layer_optim_params.add(p)
if getattr_recursive(model, "roberta.encoder.layer") is not None:
for layer in model.roberta.encoder.layer:
optimizer_grouped_parameters.append({"params": layer.parameters()})
for p in layer.parameters():
layer_optim_params.add(p)
optimizer_grouped_parameters.append(
{"params": [p for p in model.parameters() if p not in layer_optim_params]})
optimizer_constructors = {
"lamb": lambda param, lr: Lamb(
param, lr=lr, eps=args.adam_epsilon
),
"adamw": lambda param, lr: AdamW(
param, lr=lr, eps=args.adam_epsilon
),
"sgd": lambda param, lr: SGD(
param, lr=lr,
)
}
optimizer = optimizer_constructors[args.optimizer.lower()](
optimizer_grouped_parameters, args.learning_rate)
dc_optimizer = optimizer_constructors[args.dc_optimizer.lower()](
dc_model.parameters(), args.dc_learning_rate)
if args.scheduler.lower() == "linear":
print('Total steps', t_total)
scheduler = get_linear_schedule_with_warmup(
optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total
)
dc_scheduler = get_linear_schedule_with_warmup(
dc_optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total
)
elif args.scheduler.lower() == "cosine":
scheduler = CosineAnnealingLR(optimizer, t_total, 1e-8)
dc_scheduler = CosineAnnealingLR(dc_optimizer, t_total, 1e-8)
elif args.scheduler.lower() == "step":
# reduce learning rate by a half every 50k steps
scheduler = StepLR(optimizer, step_size=50000, gamma=0.5)
dc_scheduler = StepLR(dc_optimizer, step_size=50000, gamma=0.5)
else:
raise Exception(
"Scheduler {0} not recognized!".format(args.scheduler))
# Check if saved optimizer or scheduler states exist
if os.path.isfile(os.path.join(args.model_name_or_path, "optimizer.pt")) and os.path.isfile(
os.path.join(args.model_name_or_path, "scheduler.pt")
) and args.load_optimizer_scheduler:
# Load in optimizer and scheduler states
optimizer.load_state_dict(torch.load(
os.path.join(args.model_name_or_path, "optimizer.pt")))
scheduler.load_state_dict(torch.load(
os.path.join(args.model_name_or_path, "scheduler.pt")))
if args.fp16:
if 'apex' not in sys.modules:
raise ImportError(
"Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
[model, dc_model, static_dc_model], [optimizer, dc_optimizer] = amp.initialize(
[model, dc_model, static_dc_model],
[optimizer, dc_optimizer],
opt_level=args.fp16_opt_level
)
# multi-gpu training (should be after apex fp16 initialization)
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
dc_model = torch.nn.DataParallel(dc_model)
# Distributed training (should be after apex fp16 initialization)
if args.local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(
model,
device_ids=[args.local_rank],
output_device=args.local_rank,
find_unused_parameters=True,
)
dc_model = torch.nn.parallel.DistributedDataParallel(
dc_model,
device_ids=[args.local_rank],
output_device=args.local_rank,
find_unused_parameters=False,
)
static_dc_model = torch.nn.parallel.DistributedDataParallel(
static_dc_model,
device_ids=[args.local_rank],
output_device=args.local_rank,
find_unused_parameters=True,
)
# Train!
logger.info("***** Running training *****")
logger.info(" Num Epochs = %d", args.num_train_epochs)
logger.info(" Instantaneous batch size per GPU = %d",
args.per_gpu_train_batch_size)
logger.info(
" Total train batch size (w. parallel, distributed & accumulation) = %d",
args.train_batch_size
* args.gradient_accumulation_steps
* (torch.distributed.get_world_size() if args.local_rank != -1 else 1),
)
logger.info(" Gradient Accumulation steps = %d",
args.gradient_accumulation_steps)
logger.info(" Total optimization steps = %d", t_total)
global_step = 0
epochs_trained = 0
steps_trained_in_current_epoch = 0
dyn_lamb = args.lamb # dynamic lamb, the lamb that's actually used
# Check if continuing training from a checkpoint
if os.path.exists(args.model_name_or_path):
# set global_step to gobal_step of last saved checkpoint from model path
try:
global_step = int(
args.model_name_or_path.split("-")[-1].split("/")[0])
epochs_trained = global_step // (args.expected_train_size //
args.gradient_accumulation_steps)
steps_trained_in_current_epoch = global_step % (
args.expected_train_size // args.gradient_accumulation_steps)
logger.info(
" Continuing training from checkpoint, will skip to saved global_step")
logger.info(" Continuing training from epoch %d", epochs_trained)
logger.info(
" Continuing training from global step %d", global_step)
logger.info(" Will skip the first %d steps in the first epoch",
steps_trained_in_current_epoch)
except:
logger.info(" Start training from a pretrained model")
tr_loss = 0 # useless but just keep it
optim_monitors = [
'loss_adv_D', 'loss_adv_M', 'loss_ranking',
'dc_total_Q', 'dc_correct_Q', 'dc_total_P', 'dc_correct_P',
'dc_pre_softmax_logits_0', 'dc_pre_softmax_logits_1',
'dc_post_softmax_prob_0', 'dc_post_softmax_prob_1',
'embedding_norm',
]
optim_cumulator = {k: 0.0 for k in optim_monitors}
model_parts = ['roberta', 'projection']
model_parts_params = {
'roberta': [p for n, p in model.named_parameters() if 'embeddingHead' not in n],
'projection': [p for n, p in model.named_parameters() if 'embeddingHead' in n],
# 'domain_classifier': dc_model.parameters(),
}
grad_norm_cumulator = {k: 0.0 for k in model_parts}
grad_norm_cumulator.update({k+'-clipped': 0.0 for k in model_parts})
grad_norm_cumulator.update({
'domain_classifier': 0.0, 'domain_classifier-clipped': 0.0
})
model.zero_grad()
model.train()
dc_model.zero_grad()
dc_model.train()
tqdm_disable = True
train_iterator = trange(
epochs_trained, int(args.num_train_epochs), desc="Epoch",
disable=tqdm_disable or args.local_rank not in [-1, 0],
) # each iter is 1 epoch
set_seed(args) # Added here for reproductibility
accumulated_srd_embs = []
accumulated_tgd_embs = []
prev_dry_dc_state_dict = None
for m_epoch in train_iterator:
if is_first_worker():
tb_writer.add_scalar(
'epoch', m_epoch, global_step
)
# get srd and tgd batches
epoch_dataloader, _ = build_dl_iter_from_file(args, train_file, file_process_fn)
_, tgd_epoch_iter = build_dl_iter_from_file(args, tgd_file, file_process_fn)
for step, batch in tqdm(
enumerate(epoch_dataloader), desc="Iteration",
disable=tqdm_disable or args.local_rank not in [-1,0]
):
model.train() # ?
# Skip past any already trained steps if resuming training
if steps_trained_in_current_epoch > 0:
steps_trained_in_current_epoch -= 1
continue
# get srd batch and inputs
if step % args.gradient_accumulation_steps == 0:
global_step += 1
batch = tuple(t.to(args.device).long() for t in batch)
batch_size = batch[0].shape[0]
inputs = build_input_from_batch(args, batch, mode='full')
# get tgd batch and inputs
tgd_batch, tgd_epoch_iter = get_next(
tgd_epoch_iter, args, tgd_file, file_process_fn, batch_size)
tgd_batch = tuple(t.to(args.device).long() for t in tgd_batch)
tgd_query_inputs = build_input_from_batch(args, tgd_batch, mode='query')
if step % 2 == 0:
tgd_doc_inputs = build_input_from_batch(args, tgd_batch, mode='pos_doc')
else:
tgd_doc_inputs = build_input_from_batch(args, tgd_batch, mode='neg_doc')
##### 1. forward of the encoder model #####
if step % args.gradient_accumulation_steps == 0 or args.world_size == 1:
outputs = model(**inputs, output_dc_emb=True)
else:
with model.no_sync():
outputs = model(**inputs, output_dc_emb=True)
ranking_loss = outputs[0] # ranking loss
if step % 2 == 0:
srd_embs = [outputs[1][0], outputs[1][1]]
else:
srd_embs = [outputs[1][0], outputs[1][2]]
if step % args.gradient_accumulation_steps == 0 or args.world_size == 1:
tgd_query_emb = get_module(model).query_emb(**tgd_query_inputs)
tgd_doc_emb = get_module(model).body_emb(**tgd_doc_inputs)
else:
with model.no_sync(), dc_model.no_sync(), static_dc_model.no_sync():
tgd_query_emb = get_module(model).query_emb(**tgd_query_inputs)
tgd_doc_emb = get_module(model).body_emb(**tgd_doc_inputs)
tgd_embs = [tgd_query_emb, tgd_doc_emb]
detached_srd_embs = [torch.tensor(x) for x in srd_embs]
detached_tgd_embs = [torch.tensor(x) for x in tgd_embs]
if args.dc_rep_method == 'async':
if len(accumulated_srd_embs) == args.dc_rep_steps:
accumulated_srd_embs.pop(0)
accumulated_tgd_embs.pop(0)
accumulated_srd_embs.append(detached_srd_embs)
accumulated_tgd_embs.append(detached_tgd_embs)
for emb in srd_embs+tgd_embs:
optim_cumulator['embedding_norm'] += emb.norm(dim=1).mean() / 4
if args.n_gpu > 1:
ranking_loss = ranking_loss.mean()
if args.gradient_accumulation_steps > 1:
ranking_loss = ranking_loss / args.gradient_accumulation_steps
optim_cumulator['loss_ranking'] += ranking_loss.item()
# 2. feed detached embeddings to the dc_model and BP L_adv_D
for dc_rep_step in range(1+args.dc_rep_steps):
if args.dc_rep_method == 'repeat':
srd_dc_input_embs = detached_srd_embs
tgd_dc_input_embs = detached_tgd_embs
elif args.dc_rep_method == 'async':
which_step = min(dc_rep_step, len(accumulated_srd_embs)-1)
srd_dc_input_embs = accumulated_srd_embs[which_step]
tgd_dc_input_embs = accumulated_tgd_embs[which_step]
if dc_rep_step == 0:
batched_srd_dc_input_embs = srd_dc_input_embs
batched_tgd_dc_input_embs = tgd_dc_input_embs
elif dc_rep_step % args.dc_rep_step_per_batch != 0:
batched_srd_dc_input_embs[0].append(srd_dc_input_embs[0])
batched_srd_dc_input_embs[1].append(srd_dc_input_embs[1])
batched_tgd_dc_input_embs[0].append(tgd_dc_input_embs[0])
batched_tgd_dc_input_embs[1].append(tgd_dc_input_embs[1])
continue
else:
batched_srd_dc_input_embs[0].append(srd_dc_input_embs[0])
batched_srd_dc_input_embs[1].append(srd_dc_input_embs[1])
batched_tgd_dc_input_embs[0].append(tgd_dc_input_embs[0])
batched_tgd_dc_input_embs[1].append(tgd_dc_input_embs[1])
batched_srd_dc_input_embs[0] = torch.cat(batched_srd_dc_input_embs[0])
batched_srd_dc_input_embs[1] = torch.cat(batched_srd_dc_input_embs[1])
batched_tgd_dc_input_embs[0] = torch.cat(batched_tgd_dc_input_embs[0])
batched_tgd_dc_input_embs[1] = torch.cat(batched_tgd_dc_input_embs[1])
# 2.1 feed detached embeddings to the dc_model
L_adv_D = 0.0
label_size = batch_size * (1 if dc_rep_step==0 else args.dc_rep_step_per_batch)
srd_labels = torch.tensor([0] * label_size, device=args.device)
tgd_labels = torch.tensor([1] * label_size, device=args.device)
for i_emb, emb in enumerate(batched_srd_dc_input_embs):
labels = srd_labels
if step % args.gradient_accumulation_steps == 0 or args.world_size == 1:
dc_srd_outputs = dc_model(emb, labels=labels)
else:
with model.no_sync(), dc_model.no_sync(), static_dc_model.no_sync():
dc_srd_outputs = dc_model(emb, labels=labels)
L_adv_D += dc_srd_outputs[1] * args.dc_rep_step_per_batch # scale up because of the average in cross_entropy
if dc_rep_step == 0:
suffix = 'Q' if i_emb==0 else 'P'
optim_cumulator[f'dc_total_{suffix}'] += dc_srd_outputs[2][0]
optim_cumulator[f'dc_correct_{suffix}'] += dc_srd_outputs[2][1]
optim_cumulator['dc_pre_softmax_logits_0'] += dc_srd_outputs[0][:, 0].mean() / 4
optim_cumulator['dc_pre_softmax_logits_1'] += dc_srd_outputs[0][:, 1].mean() / 4
probs = torch.softmax(dc_srd_outputs[0], dim=1)
optim_cumulator['dc_post_softmax_prob_0'] += probs[:, 0].mean() / 4
optim_cumulator['dc_post_softmax_prob_1'] += probs[:, 1].mean() / 4
for i_emb, emb in enumerate(batched_tgd_dc_input_embs):
labels = tgd_labels
if step % args.gradient_accumulation_steps == 0 or args.world_size == 1:
dc_tgd_outputs = dc_model(emb, labels=labels)
else:
with model.no_sync(), dc_model.no_sync(), static_dc_model.no_sync():
dc_tgd_outputs = dc_model(emb, labels=labels)
L_adv_D += dc_tgd_outputs[1] * args.dc_rep_step_per_batch # scale up because of the average in cross_entropy
if dc_rep_step == 0:
suffix = 'Q' if i_emb==0 else 'P'
optim_cumulator[f'dc_total_{suffix}'] += dc_tgd_outputs[2][0]
optim_cumulator[f'dc_correct_{suffix}'] += dc_tgd_outputs[2][1]
optim_cumulator['dc_pre_softmax_logits_0'] += dc_tgd_outputs[0][:, 0].mean() / 4
optim_cumulator['dc_pre_softmax_logits_1'] += dc_tgd_outputs[0][:, 1].mean() / 4
probs = torch.softmax(dc_tgd_outputs[0], dim=1)
optim_cumulator['dc_post_softmax_prob_0'] += probs[:, 0].mean() / 4
optim_cumulator['dc_post_softmax_prob_1'] += probs[:, 1].mean() / 4
if dc_rep_step % args.dc_rep_step_per_batch == 0:
batched_srd_dc_input_embs = [[], []]
batched_tgd_dc_input_embs = [[], []]
if dc_rep_step == 0:
continue # this dc_rep_step is only for logging things for optim_cumulator
if args.n_gpu > 1:
L_adv_D = L_adv_D.mean() # mean() to average on multi-gpu parallel training
if args.gradient_accumulation_steps > 1:
L_adv_D = L_adv_D / args.gradient_accumulation_steps
optim_cumulator['loss_adv_D'] += L_adv_D.item() / args.dc_rep_steps
# 2.2 BP of L_adv_D; dc_optimizer update
if args.fp16:
with amp.scale_loss(L_adv_D, dc_optimizer) as scaled_loss:
scaled_loss.backward()
else:
if step % args.gradient_accumulation_steps == 0 or args.world_size == 1:
L_adv_D.backward()
else:
with model.no_sync(), dc_model.no_sync(), static_dc_model.no_sync():
L_adv_D.backward()
if step % args.gradient_accumulation_steps == 0:
grad_norm_cumulator['domain_classifier'] += compute_total_grad_L2_norm(
dc_model.parameters()
) / args.dc_rep_steps
if not args.no_gn_clip:
if args.fp16:
torch.nn.utils.clip_grad_norm_(
amp.master_params(dc_optimizer), args.max_grad_norm)
else:
torch.nn.utils.clip_grad_norm_(
dc_model.parameters(), args.max_grad_norm)
grad_norm_cumulator['domain_classifier-clipped'] += compute_total_grad_L2_norm(
dc_model.parameters()
) / args.dc_rep_steps
dc_optimizer.step()
dc_model.zero_grad()
if step % args.gradient_accumulation_steps == 0:
dc_scheduler.step() # this is outside of the dc_rep_step loop
# 3.1 copy the dc_model, feed (undetached) embeddings to it
get_module(static_dc_model).load_state_dict(get_module(dc_model).state_dict())
L_adv_M = 0.0
if args.dc_loss_choice == 'minimax':
srd_labels = torch.tensor([0] * batch_size, device=args.device)
tgd_labels = torch.tensor([1] * batch_size, device=args.device)
elif args.dc_loss_choice == 'gan':
tgd_labels = torch.tensor([0] * batch_size, device=args.device)
elif args.dc_loss_choice == 'confusion':
srd_labels = 'uniform'
tgd_labels = 'uniform'
else:
raise NotImplementedError()
if args.dc_loss_choice != 'gan':
for emb in srd_embs:
if step % args.gradient_accumulation_steps == 0 or args.world_size == 1:
dc_srd_outputs = static_dc_model(emb, labels=srd_labels)
else:
with model.no_sync(), dc_model.no_sync(), static_dc_model.no_sync():
dc_srd_outputs = static_dc_model(emb, labels=srd_labels)
L_adv_M += dc_srd_outputs[1]
for emb in tgd_embs:
if step % args.gradient_accumulation_steps == 0 or args.world_size == 1:
dc_tgd_outputs = static_dc_model(emb, labels=tgd_labels)
else:
with model.no_sync(), dc_model.no_sync(), static_dc_model.no_sync():
dc_tgd_outputs = static_dc_model(emb, labels=tgd_labels)
L_adv_M += dc_tgd_outputs[1]
if args.dc_loss_choice == 'minimax':
L_adv_M = -L_adv_M
L_adv_M *= dyn_lamb
if args.n_gpu > 1:
L_adv_M = L_adv_M.mean()
if args.gradient_accumulation_steps > 1:
L_adv_M = L_adv_M / args.gradient_accumulation_steps
optim_cumulator['loss_adv_M'] += L_adv_M.item()
# 3.2 BP of ranking loss and L_adv_M; optimizer update
loss = ranking_loss + L_adv_M
if args.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
if step % args.gradient_accumulation_steps == 0 or args.world_size == 1:
loss.backward()
else:
with model.no_sync(), dc_model.no_sync(), static_dc_model.no_sync():
loss.backward()
if step % args.gradient_accumulation_steps == 0:
for model_part, params in model_parts_params.items():
grad_norm_cumulator[model_part] += compute_total_grad_L2_norm(params)
if not args.no_gn_clip:
if args.fp16:
torch.nn.utils.clip_grad_norm_(
amp.master_params(optimizer), args.max_grad_norm)
else:
torch.nn.utils.clip_grad_norm_(
model.parameters(), args.max_grad_norm)
for model_part, params in model_parts_params.items():
grad_norm_cumulator[model_part+'-clipped'] += compute_total_grad_L2_norm(params)
optimizer.step()
scheduler.step()
model.zero_grad()
global_step += 1
# end of the main part of training
if step % args.gradient_accumulation_steps == 0:
if args.lamb_reduce_to_half_steps > 0:
if is_first_worker():
tb_writer.add_scalar("lambda", dyn_lamb, global_step)
dyn_lamb = args.lamb * 2**(-global_step / args.lamb_reduce_to_half_steps)
if (args.logging_steps > 0 and global_step % args.logging_steps == 0):
logs = {}
logs["linear_layer_L2norm"] = get_module(dc_model).layers[0].weight.norm().item()
logs["linear_layer_mean"] = get_module(dc_model).layers[0].weight.mean().item()
logs["learning_rate"] = scheduler.get_last_lr()[0]
logs["learning_rate_dc"] = dc_optimizer.param_groups[0]['lr']
logs["dc_acc_Q"] = optim_cumulator['dc_correct_Q'] / (1e-10 + optim_cumulator['dc_total_Q'])
logs["dc_acc_P"] = optim_cumulator['dc_correct_P'] / (1e-10 + optim_cumulator['dc_total_P'])
for k in optim_monitors:
if k not in ['dc_total_Q', 'dc_correct_Q', 'dc_total_P', 'dc_correct_P']:
logs[k] = float(optim_cumulator[k] / args.logging_steps / args.gradient_accumulation_steps)
optim_cumulator = {k: 0.0 for k in optim_monitors} # reset
if is_first_worker():
for key, value in logs.items():
tb_writer.add_scalar(key, value, global_step)
logs.update({k: v/args.logging_steps for k, v in grad_norm_cumulator.items()})
logger.info(json.dumps({**logs, **{"step": global_step}}))
for key, value in grad_norm_cumulator.items():
tb_writer.add_scalar(
'grad_norm-'+key,
value / args.logging_steps,
global_step)
grad_norm_cumulator[key] = 0.0 # reset
if args.eval_steps > 0 and global_step % args.eval_steps == 0:
prev_dry_dc_state_dict = intrain_dev_eval(
args, global_step, model, tb_writer, prev_dry_dc_state_dict)
intrain_save_checkpoint(
args, global_step, model, tokenizer, optimizer, scheduler)
if args.max_steps > 0 and global_step > args.max_steps:
train_iterator.close()
break
if args.local_rank == -1 or torch.distributed.get_rank() == 0:
tb_writer.close()
return global_step, tr_loss / global_step
def load_stuff(model_type, args):
# Prepare GLUE task
args.task_name = args.task_name.lower()
args.output_mode = "classification"
label_list = ["0", "1"]
num_labels = len(label_list)
args.num_labels = num_labels
# store args
if args.local_rank != -1:
args.world_size = torch.distributed.get_world_size()
print('world_size', args.world_size)
else:
args.world_size = 1
# Load pretrained model and tokenizer
if args.local_rank not in [-1, 0]:
# Make sure only the first process in distributed training will download model & vocab
torch.distributed.barrier()
configObj = MSMarcoConfigDict[model_type]
model_args = type('', (), {})()
model_args.use_mean = configObj.use_mean
config = configObj.config_class.from_pretrained(
args.config_name if args.config_name else args.model_name_or_path,
num_labels=args.num_labels,
finetuning_task=args.task_name,
cache_dir=args.cache_dir if args.cache_dir else None,
)
config.output_hidden_states = True
tokenizer = configObj.tokenizer_class.from_pretrained(
args.tokenizer_name if args.tokenizer_name else args.model_name_or_path,
do_lower_case=args.do_lower_case,
cache_dir=args.cache_dir if args.cache_dir else None,
)
model = configObj.model_class.from_pretrained(
args.model_name_or_path,
from_tf=bool(".ckpt" in args.model_name_or_path),
config=config,
cache_dir=args.cache_dir if args.cache_dir else None,
model_argobj=model_args,
)
if args.local_rank == 0:
# Make sure only the first process in distributed training will download model & vocab
torch.distributed.barrier()
model.to(args.device)
return config, tokenizer, model, configObj
def get_arguments():
parser = argparse.ArgumentParser()
# required arguments
parser.add_argument(
"--data_dir",
default=None,
type=str,
required=True,
help="The input data dir. Should contain the .tsv files (or other data files) for the task.",
)
parser.add_argument(
"--tgd_data_name",
default=None,
type=str,
required=False,
help="The target domain dataset name; if there are multiple, separate with commas.",
)
parser.add_argument(
"--tgd_data_dir",
default=None,
type=str,
required=False,
help="The target domain input data dir; if there are multiple, separate with commas.",
)
parser.add_argument(
"--intraindev_data_dir",
default=None,
type=str,
required=False,
help="The dev set data dir; if there are multiple, separate with commas.",
)
parser.add_argument(
"--intraindev_data_name",
default=None,
type=str,
required=False,
help="The dev set dataset name; if there are multiple, separate with commas.",
)
parser.add_argument(
"--train_model_type",
default=None,
type=str,
required=True,
)
parser.add_argument(
"--model_name_or_path",
default=None,
type=str,
required=True,
)
parser.add_argument(
"--task_name",
default=None,
type=str,
required=True,
)
parser.add_argument(
"--output_dir",
default=None,
type=str,
required=True,
help="The output directory where the model predictions and checkpoints will be written.",
)
# Other parameters
parser.add_argument(
"--config_name",
default="",
type=str,
help="Pretrained config name or path if not the same as model_name",
)
parser.add_argument(
"--tokenizer_name",
default="",
type=str,
help="Pretrained tokenizer name or path if not the same as model_name",
)
parser.add_argument(
"--cache_dir",
default="",
type=str,
help="Where do you want to store the pre-trained models downloaded from s3",
)
parser.add_argument(
"--saved_embedding_dir",
default="",
type=str,
help="The directory where intraindev embeddings are dumped",
)
parser.add_argument(
"--max_seq_length",
default=128,
type=int,
help="The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded.",
)
parser.add_argument(
"--max_query_length",
default=64,
type=int,
help="The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded.",
)
parser.add_argument(
"--do_train",
action="store_true",
help="Whether to run training.",
)
parser.add_argument(
"--do_eval",
action="store_true",
help="Whether to run eval on the dev set.",
)
parser.add_argument(
"--evaluate_during_training",
action="store_true",
help="Rul evaluation during training at each logging step.",
)
parser.add_argument(
"--do_lower_case",
action="store_true",
help="Set this flag if you are using an uncased model.",
)
parser.add_argument(
"--log_dir",
default=None,
type=str,
help="Tensorboard log dir",
)
parser.add_argument(
"--eval_type",
default="full",
type=str,
help="MSMarco eval type - dev full or small",
)
parser.add_argument(
"--optimizer",
default="lamb",
type=str,
help="Optimizer - lamb or adamW or SGD",
)
parser.add_argument(
"--dc_optimizer",
default="lamb",
type=str,
help="Optimizer - lamb or adamW or SGD",
)
parser.add_argument(
"--scheduler",
default="linear",
type=str,
help="Scheduler - linear, cosine, or step",
)
parser.add_argument(
"--dc_loss_choice",
default="minimax",
type=str,
help="Adversarial loss choice (ADDA paper, Table 1, 4th column).")
parser.add_argument(
"--dc_layers",
default=1,
type=int,
help="How many layers to use for the domain classifier",
)
parser.add_argument(
"--per_gpu_train_batch_size",
default=8,
type=int,
help="Batch size per GPU/CPU for training.",
)
parser.add_argument(
"--per_gpu_eval_batch_size",
default=8,
type=int,
help="Batch size per GPU/CPU for evaluation.",
)
parser.add_argument(
"--gradient_accumulation_steps",
type=int,
default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.",
)
parser.add_argument(
"--learning_rate",
default=5e-5,
type=float,
help="The initial learning rate for the ranker model.",
)
parser.add_argument(
"--dc_learning_rate",
default=5e-5,
type=float,
help="The initial learning rate for the domain classifier.",
)
parser.add_argument(
"--lamb",
default=0.0,
type=float,
help="HP for GAN loss.",
)
parser.add_argument(
"--lamb_reduce_to_half_steps",
default=0,
type=int,
help="Reduce dyn_lamb exponentially, and it will be reduced to a half after X steps.",
)
parser.add_argument(
"--dc_rep_steps",
default=1,
type=int,
help="Update dc_model over a single batch for X steps.",
)
parser.add_argument(
"--dc_rep_method",
default="repeat",
type=str,
help="Use what data for dc repetitive training. "
"repeat: use the same batch repetitively; "
"async: use embeddings recorded from previous batches."
)
parser.add_argument(
"--dc_rep_step_per_batch",
default=1,
type=int,
help="For dc_rep, how many steps of embeddings to put in one batch",
)
parser.add_argument(
"--no_gn_clip",
action="store_true",
help="Whether to disable grad norm clipping",
)
parser.add_argument(
"--weight_decay",
default=0.0,
type=float,
help="Weight decay if we apply some.",
)
parser.add_argument(
"--dropout_rate",
default=0.1,
type=float,
help="Dropout probability",
)
parser.add_argument(
"--adam_epsilon",
default=1e-8,
type=float,
help="Epsilon for Adam optimizer.",
)
parser.add_argument(
"--max_grad_norm",
default=1.0,
type=float,
help="Max gradient norm.",
)
parser.add_argument(
"--num_train_epochs",
default=3.0,
type=float,
help="Total number of training epochs to perform.",
)
parser.add_argument(
"--max_steps",
default=-1,
type=int,
help="If > 0: set total number of training steps to perform. Override num_train_epochs.",
)
parser.add_argument(
"--warmup_steps",
default=0,
type=int,
help="Linear warmup over warmup_steps.",
)
parser.add_argument(
"--logging_steps",
type=int,
default=500,
help="Log every X updates steps.",
)
parser.add_argument(
"--eval_steps",
type=int,
default=500,
help="Evaluate and save checkpoint every X global steps.",
)
parser.add_argument(
"--eval_all_checkpoints",
action="store_true",
help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number",
)
parser.add_argument(
"--no_cuda",
action="store_true",
help="Avoid using CUDA when available",
)
parser.add_argument(
"--overwrite_output_dir",
action="store_true",
help="Overwrite the content of the output directory",
)
parser.add_argument(
"--overwrite_cache",
action="store_true",
help="Overwrite the cached training and evaluation sets",
)
parser.add_argument(
"--seed",
type=int,
default=42,
help="random seed for initialization",
)
parser.add_argument(
"--fp16",
action="store_true",
help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit",
)
parser.add_argument(
"--fp16_opt_level",
type=str,
default="O1",
help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html",
)
parser.add_argument(
"--expected_train_size",
default=100000,
type=int,
help="Expected train dataset size",
)
parser.add_argument(
"--load_optimizer_scheduler",
default=False,
action="store_true",
help="load scheduler from checkpoint or not",
)
parser.add_argument(
"--local_rank",
type=int,
default=-1,
help="For distributed training: local_rank",
)
parser.add_argument(
"--server_ip",
type=str,
default="",
help="For distant debugging.",
)
parser.add_argument(
"--server_port",
type=str,
default="",
help="For distant debugging.",
)
args = parser.parse_args()
# sort intraindev datasets, so that tinymsmarco is the first and the target domain dataset is the second
args.intraindev_data_name = args.intraindev_data_name.split(',')
args.intraindev_data_dir = args.intraindev_data_dir.split(',')
assert args.intraindev_data_name[0] == 'tinymsmarco'
assert len(args.intraindev_data_name) >= 2
tgd_position = args.intraindev_data_name.index(args.tgd_data_name)
args.intraindev_data_name[1], args.intraindev_data_name[tgd_position] = args.intraindev_data_name[tgd_position], args.intraindev_data_name[1]
args.intraindev_data_dir[1], args.intraindev_data_dir[tgd_position] = args.intraindev_data_dir[tgd_position], args.intraindev_data_dir[1]
return args
def set_env(args):
if (
os.path.exists(args.output_dir)
and os.listdir(args.output_dir)
and args.do_train
and not args.overwrite_output_dir
):
raise ValueError(
"Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.".format(
args.output_dir
)
)
# Setup distant debugging if needed
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach")
ptvsd.enable_attach(
address=(args.server_ip, args.server_port), redirect_output=True)
ptvsd.wait_for_attach()
# Setup CUDA, GPU & distributed training
if args.local_rank == -1 or args.no_cuda:
device = torch.device(
"cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
args.n_gpu = torch.cuda.device_count()
else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
torch.distributed.init_process_group(backend="nccl")
args.n_gpu = 1
args.device = device
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN,
)
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
args.local_rank,
device,
args.n_gpu,
bool(args.local_rank != -1),
args.fp16,
)
# Set seed
set_seed(args)
def save_checkpoint(args, model, tokenizer):
# Saving best-practices: if you use defaults names for the model, you can reload it using from_pretrained()
if args.do_train and is_first_worker():
# Create output directory if needed
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
logger.info("Saving model checkpoint to %s", args.output_dir)
# Save a trained model, configuration and tokenizer using `save_pretrained()`.
# They can then be reloaded using `from_pretrained()`
model_to_save = (
model.module if hasattr(model, "module") else model
) # Take care of distributed/parallel training
model_to_save.save_pretrained(args.output_dir)
tokenizer.save_pretrained(args.output_dir)
# Good practice: save your training arguments together with the trained model
torch.save(args, os.path.join(args.output_dir, "training_args.bin"))
if args.local_rank != -1:
dist.barrier()
def evaluation(args, model, tokenizer):
# Evaluation
results = {}
if args.do_eval:
model_dir = args.model_name_or_path if args.model_name_or_path else args.output_dir
checkpoints = [model_dir]
for checkpoint in checkpoints:
global_step = checkpoint.split(
"-")[-1] if len(checkpoints) > 1 else ""
prefix = checkpoint.split(
"/")[-1] if checkpoint.find("checkpoint") != -1 else ""
model.eval()
reranking_mrr, full_ranking_mrr = passage_dist_eval(
args, model, tokenizer)
if is_first_worker():
print(
"Reranking/Full ranking mrr: {0}/{1}".format(str(reranking_mrr), str(full_ranking_mrr)))
if args.local_rank != -1:
dist.barrier()
return results
def main():
args = get_arguments()
set_env(args)
config, tokenizer, model, configObj = load_stuff(
args.train_model_type, args)
dc_model = DomainClassifier(
args,
input_size=config.hidden_size,
n_class=2
)
dc_model.to(args.device)
# Training
if args.do_train:
logger.info("Training/evaluation parameters %s", args)
def file_process_fn(line, i):
return configObj.process_fn(line, i, tokenizer, args)
train_fname = args.data_dir+"/triples.train.small.tsv"
train_file = open(train_fname, encoding="utf-8-sig")
tgd_file = open(os.path.join(args.tgd_data_dir, "triples.simple.tsv"))
global_step, tr_loss = train(
args, model, dc_model, tokenizer, train_file, tgd_file, file_process_fn)
logger.info(" global_step = %s, average loss = %s", global_step, tr_loss)
train_file.close()
tgd_file.close()
save_checkpoint(args, model, tokenizer)
results = evaluation(args, model, tokenizer)
return results
if __name__ == "__main__":
main()
| 44,416 | 36.045038 | 145 | py |
modir | modir-master/drivers/run_ann_data_gen.py | import sys
sys.path += ['../']
import torch
import os
from collections import defaultdict
import faiss
from utils.util import (
barrier_array_merge,
convert_to_string_id,
is_first_worker,
StreamingDataset,
EmbeddingCache,
get_checkpoint_no,
get_latest_ann_data
)
import csv
import copy
import transformers
from transformers import (
AdamW,
RobertaConfig,
RobertaForSequenceClassification,
RobertaTokenizer,
get_linear_schedule_with_warmup,
RobertaModel,
)
from data.msmarco_data import GetProcessingFn
from model.models import MSMarcoConfigDict, ALL_MODELS
from torch import nn
import torch.distributed as dist
from tqdm import tqdm, trange
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
import numpy as np
from os.path import isfile, join
import argparse
import json
import logging
import random
import time
import pytrec_eval
torch.multiprocessing.set_sharing_strategy('file_system')
logger = logging.getLogger(__name__)
# ANN - active learning ------------------------------------------------------
try:
from torch.utils.tensorboard import SummaryWriter
except ImportError:
from tensorboardX import SummaryWriter
def get_latest_checkpoint(args):
if not os.path.exists(args.training_dir):
return args.init_model_dir, 0
subdirectories = list(next(os.walk(args.training_dir))[1])
def valid_checkpoint(checkpoint):
chk_path = os.path.join(args.training_dir, checkpoint)
scheduler_path = os.path.join(chk_path, "scheduler.pt")
return os.path.exists(scheduler_path)
checkpoint_nums = [get_checkpoint_no(s) for s in subdirectories if valid_checkpoint(s)]
if args.fix_refresh_rate > 0:
checkpoint_nums = [x for x in checkpoint_nums if x % args.fix_refresh_rate == 0]
if len(checkpoint_nums) > 0:
return os.path.join(args.training_dir, "checkpoint-" +
str(max(checkpoint_nums))) + "/", max(checkpoint_nums)
return args.init_model_dir, 0
def load_positive_ids(data_path, dev_set=False):
logger.info(f"Loading query_2_pos_docid from {data_path}")
query_positive_id = {}
query_positive_id_path = os.path.join(
data_path,
"dev-qrel.tsv" if dev_set else "train-qrel.tsv"
)
with open(query_positive_id_path, 'r', encoding='utf8') as f:
tsvreader = csv.reader(f, delimiter="\t")
for [topicid, docid, rel] in tsvreader:
topicid = int(topicid)
docid = int(docid)
if not dev_set:
assert rel == "1"
query_positive_id[topicid] = docid
else:
if topicid not in query_positive_id:
query_positive_id[topicid] = {}
query_positive_id[topicid][docid] = max(0, int(rel))
return query_positive_id
def load_model(args, checkpoint_path):
label_list = ["0", "1"]
num_labels = len(label_list)
args.model_type = args.model_type.lower()
configObj = MSMarcoConfigDict[args.model_type]
args.model_name_or_path = checkpoint_path
config = configObj.config_class.from_pretrained(
args.model_name_or_path,
num_labels=num_labels,
finetuning_task="MSMarco",
cache_dir=args.cache_dir if args.cache_dir else None,
)
tokenizer = configObj.tokenizer_class.from_pretrained(
args.model_name_or_path,
do_lower_case=True,
cache_dir=args.cache_dir if args.cache_dir else None,
)
model = configObj.model_class.from_pretrained(
args.model_name_or_path,
from_tf=bool(".ckpt" in args.model_name_or_path),
config=config,
cache_dir=args.cache_dir if args.cache_dir else None,
)
model.to(args.device)
logger.info("Inference parameters %s", args)
if args.local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(
model,
device_ids=[
args.local_rank],
output_device=args.local_rank,
find_unused_parameters=True,
)
return config, tokenizer, model
def InferenceEmbeddingFromStreamDataLoader(
args,
model,
train_dataloader,
is_query_inference=True,
prefix=""):
# expect dataset from ReconstructTrainingSet
results = {}
eval_batch_size = args.per_gpu_eval_batch_size
# Inference!
logger.info("***** Running ANN Embedding Inference *****")
logger.info(" Batch size = %d", eval_batch_size)
embedding = []
embedding2id = []
if args.local_rank != -1:
dist.barrier()
model.eval()
for idx, batch in enumerate(tqdm(train_dataloader,
desc="Inferencing",
disable=args.local_rank not in [-1,0],
position=0,
leave=True)):
idxs = batch[3].detach().numpy() # [#B]
batch = tuple(t.to(args.device) for t in batch)
with torch.no_grad():
inputs = {
"input_ids": batch[0].long(),
"attention_mask": batch[1].long()}
if is_query_inference:
if args.world_size == 1:
embs = model.query_emb(**inputs)
else:
embs = model.module.query_emb(**inputs)
else:
if args.world_size == 1:
embs = model.body_emb(**inputs)
else:
embs = model.module.body_emb(**inputs)
embs = embs.detach().cpu().numpy()
# check for multi chunk output for long sequence
if len(embs.shape) == 3:
for chunk_no in range(embs.shape[1]):
embedding2id.append(idxs)
embedding.append(embs[:, chunk_no, :])
else:
embedding2id.append(idxs)
embedding.append(embs)
embedding = np.concatenate(embedding, axis=0)
embedding2id = np.concatenate(embedding2id, axis=0)
return embedding, embedding2id
# streaming inference
def StreamInferenceDoc(args, model, fn, prefix, f, output_path, is_query_inference=True):
inference_batch_size = args.per_gpu_eval_batch_size # * max(1, args.n_gpu)
inference_dataset = StreamingDataset(f, fn)
inference_dataloader = DataLoader(
inference_dataset,
batch_size=inference_batch_size)
if args.local_rank != -1:
dist.barrier() # directory created
_embedding, _embedding2id = InferenceEmbeddingFromStreamDataLoader(
args, model, inference_dataloader, is_query_inference=is_query_inference, prefix=prefix)
logger.info("merging embeddings")
# preserve to memory
full_embedding = barrier_array_merge(
args,
_embedding,
prefix=prefix + "_emb_p_",
output_path=output_path,
load_cache=False,
only_load_in_master=True)
full_embedding2id = barrier_array_merge(
args,
_embedding2id,
prefix=prefix + "_embid_p_",
output_path=output_path,
load_cache=False,
only_load_in_master=True)
return full_embedding, full_embedding2id
def generate_new_ann(
args,
output_num,
checkpoint_path,
srd_query_positive_id,
srd_dev_query_positive_id,
tgd_query_positive_id,
latest_step_num):
config, tokenizer, model = load_model(args, checkpoint_path)
logger.info("***** inference of srd dev query *****")
srd_dev_query_collection_path = os.path.join(args.srd_data_dir, "dev-query")
srd_dev_query_cache = EmbeddingCache(srd_dev_query_collection_path)
with srd_dev_query_cache as emb:
srd_dev_query_embedding, srd_dev_query_embedding2id = StreamInferenceDoc(
args,
model,
GetProcessingFn(args, query=True),
"dev_query_" + str(latest_step_num) + "_",
emb,
output_path=args.output_dir,
is_query_inference=True
)
logger.info("***** inference of srd passages *****")
srd_passage_collection_path = os.path.join(args.srd_data_dir, "passages")
srd_passage_cache = EmbeddingCache(srd_passage_collection_path)
with srd_passage_cache as emb:
srd_passage_embedding, srd_passage_embedding2id = StreamInferenceDoc(
args,
model,
GetProcessingFn(args, query=False),
"passage_" + str(latest_step_num) + "_",
emb,
output_path=args.output_dir,
is_query_inference=False
)
if args.inference:
return
logger.info("***** inference of srd train query *****")
srd_query_collection_path = os.path.join(args.srd_data_dir, "train-query")
srd_query_cache = EmbeddingCache(srd_query_collection_path)
with srd_query_cache as emb:
srd_query_embedding, srd_query_embedding2id = StreamInferenceDoc(
args,
model,
GetProcessingFn(args, query=True),
"query_" + str(latest_step_num) + "_",
emb,
output_path=args.output_dir,
is_query_inference=True
)
if is_first_worker():
# ANN search for dev passages and dev queries
srd_dim = srd_passage_embedding.shape[1]
print('srd passage embedding shape: ' + str(srd_passage_embedding.shape))
faiss.omp_set_num_threads(16)
srd_cpu_index = faiss.IndexFlatIP(srd_dim)
srd_cpu_index.add(srd_passage_embedding)
logger.info("***** Done Dev ANN Index *****")
_, srd_I = srd_cpu_index.search(srd_dev_query_embedding, 100) # I: [number of queries, topk]
result_dict, num_queries_srd = EvalDevQuery(
args, srd_dev_query_embedding2id, srd_passage_embedding2id,
srd_dev_query_positive_id, srd_I)
result_dict_with_srd_name = {}
for k, v in result_dict.items():
result_dict_with_srd_name['msmarco-'+k] = v
dump_eval_result(result_dict_with_srd_name, args.output_dir, output_num, checkpoint_path)
if args.tgd_data_dir is not None:
logger.info("***** inference of tgd passages *****")
tgd_passage_collection_path = os.path.join(args.tgd_data_dir, "passages")
tgd_passage_cache = EmbeddingCache(tgd_passage_collection_path)
with tgd_passage_cache as emb:
tgd_passage_embedding, tgd_passage_embedding2id = StreamInferenceDoc(
args,
model,
GetProcessingFn(args, query=False),
"passage_" + str(latest_step_num) + "_",
emb,
output_path=args.tgd_output_dir,
is_query_inference=False
)
logger.info("***** inference of tgd query *****")
tgd_query_collection_path = os.path.join(args.tgd_data_dir, "train-query")
tgd_query_cache = EmbeddingCache(tgd_query_collection_path)
with tgd_query_cache as emb:
tgd_query_embedding, tgd_query_embedding2id = StreamInferenceDoc(
args,
model,
GetProcessingFn(args, query=True, tgd=True),
"query_" + str(latest_step_num) + "_",
emb,
output_path=args.tgd_output_dir,
is_query_inference=True
)
if is_first_worker():
if args.tgd_data_dir is not None:
construct_new_train_set(
args,
tgd_passage_embedding, tgd_passage_embedding2id,
tgd_query_embedding, tgd_query_embedding2id,
tgd_query_positive_id,
output_num,
checkpoint_path,
output_path=args.tgd_output_dir
)
# the ranking training set: (query, pos_doc, [nearest_neg_doc]*n)
construct_new_train_set(
args,
srd_passage_embedding, srd_passage_embedding2id,
srd_query_embedding, srd_query_embedding2id,
srd_query_positive_id,
output_num,
checkpoint_path,
output_path=args.output_dir
)
# return result_dict['ndcg@20'], num_queries_dev
def dump_eval_result(result_dict, output_path, output_num, checkpoint_path):
ndcg_output_path = os.path.join(
output_path, f"ann_ndcg_" + str(output_num))
if os.path.exists(ndcg_output_path):
with open(ndcg_output_path) as fin:
json_dict = json.load(fin)
else:
json_dict = {}
json_dict.update(result_dict)
with open(ndcg_output_path, 'w') as f:
json_dict['checkpoint'] = checkpoint_path
json.dump(json_dict, f)
def construct_new_nngan_train_set(
args,
srd_query_embedding, srd_query_embedding2id,
srd_passage_embedding, srd_passage_embedding2id,
tgd_passage_embedding, tgd_passage_embedding2id,
output_num,
checkpoint_path,
output_path,
max_size,
):
# the domain adaptation training set:
# THIS ONE NOT USED NOW: (srd_query, [nearest_srd_doc]*n, [nearest_tgd_doc]*n)
# (srd_doc, [nearest_srd_doc]*n, [nearest_tgd_doc]*n)
# (tgd_doc, [nearest_tgd_doc]*n, [nearest_srd_doc]*n)
dim = srd_query_embedding.shape[1]
faiss.omp_set_num_threads(16)
srd_passage_index = faiss.IndexFlatIP(dim)
srd_passage_index.add(srd_passage_embedding)
tgd_passage_index = faiss.IndexFlatIP(dim)
tgd_passage_index.add(tgd_passage_embedding)
logger.info("***** Done srd & tgd passage index *****")
chunk_factor = args.ann_chunk_factor
effective_idx = output_num % chunk_factor
if chunk_factor <= 0:
chunk_factor = 1
search_and_build_dataset(
args,
chunk_factor=chunk_factor, effective_idx=effective_idx,
pos_index=srd_passage_index, pos_index2id=srd_passage_embedding2id,
neg_index=tgd_passage_index, neg_index2id=tgd_passage_embedding2id,
query_embedding=srd_passage_embedding, query_embedding2id=srd_passage_embedding2id,
output_fname = os.path.join(output_path, f"sd_sd_td_{output_num}"),
max_size=max_size
)
search_and_build_dataset(
args,
chunk_factor=1, effective_idx=0,
pos_index=tgd_passage_index, pos_index2id=tgd_passage_embedding2id,
neg_index=srd_passage_index, neg_index2id=srd_passage_embedding2id,
query_embedding=tgd_passage_embedding, query_embedding2id=tgd_passage_embedding2id,
output_fname = os.path.join(output_path, f"td_td_sd_{output_num}"),
max_size=int(1e10)
)
def search_and_build_dataset(
args,
chunk_factor, effective_idx,
pos_index, pos_index2id,
neg_index, neg_index2id,
query_embedding, query_embedding2id,
output_fname,
max_size,
):
num_queries = len(query_embedding)
queries_per_chunk = num_queries // chunk_factor
q_start_idx = queries_per_chunk * effective_idx
if effective_idx == chunk_factor - 1:
q_end_idx = num_queries
else:
q_end_idx = q_start_idx + queries_per_chunk
q_end_idx = min(q_end_idx, q_start_idx+max_size)
query_embedding = query_embedding[q_start_idx:q_end_idx]
query_embedding2id = query_embedding2id[q_start_idx:q_end_idx]
effective_q_id = set(query_embedding2id.flatten())
_, pos_I = pos_index.search(query_embedding, args.nn_topk_training)
_, neg_I = neg_index.search(query_embedding, args.nn_posneg_sample)
with open(output_fname, 'w') as fout:
for query_idx in range(pos_I.shape[0]):
if query_idx % 5000 == 0:
logger.info(f"query_idx = {query_idx}")
query_id = query_embedding2id[query_idx]
if query_id not in effective_q_id:
continue
selected_pos_ann_idx = random.choices(
pos_I[query_idx], #[1:], # excluding itself
k=args.nn_posneg_sample
)
selected_neg_ann_idx = neg_I[query_idx]
print("{}\t{}\t{}".format(
query_id,
','.join([str(pos_index2id[pid]) for pid in selected_pos_ann_idx]),
','.join([str(neg_index2id[pid]) for pid in selected_neg_ann_idx]),
), file=fout)
def construct_new_train_set(
args,
passage_embedding, passage_embedding2id,
query_embedding, query_embedding2id,
training_query_positive_id,
output_num,
checkpoint_path,
output_path,
):
# ANN search for (train) passages and queries, output the new training set to files
dim = passage_embedding.shape[1]
print('passage embedding shape: ' + str(passage_embedding.shape))
faiss.omp_set_num_threads(16)
cpu_index = faiss.IndexFlatIP(dim)
cpu_index.add(passage_embedding)
logger.info("***** Done ANN Index *****")
# Construct new training set ==================================
chunk_factor = args.ann_chunk_factor
effective_idx = output_num % chunk_factor
if chunk_factor <= 0:
chunk_factor = 1
num_queries = len(query_embedding)
queries_per_chunk = num_queries // chunk_factor
q_start_idx = queries_per_chunk * effective_idx
q_end_idx = num_queries if (
effective_idx == (
chunk_factor -
1)) else (
q_start_idx +
queries_per_chunk)
query_embedding = query_embedding[q_start_idx:q_end_idx]
query_embedding2id = query_embedding2id[q_start_idx:q_end_idx]
logger.info(
"Chunked {} query from {}".format(
len(query_embedding),
num_queries))
# I: [number of queries, topk]
_, I = cpu_index.search(query_embedding, args.topk_training)
effective_q_id = set(query_embedding2id.flatten())
query_negative_passage = GenerateNegativePassaageID(
args,
query_embedding2id,
passage_embedding2id,
training_query_positive_id,
I,
effective_q_id)
logger.info("***** Construct ANN Triplet *****")
train_data_output_path = os.path.join(
output_path, f"ann_training_data_" + str(output_num))
with open(train_data_output_path, 'w') as f:
query_range = list(range(I.shape[0]))
random.shuffle(query_range)
for query_idx in query_range:
query_id = query_embedding2id[query_idx]
if query_id not in effective_q_id or query_id not in training_query_positive_id:
continue
pos_pid = training_query_positive_id[query_id]
f.write(
"{}\t{}\t{}\n".format(
query_id, pos_pid, ','.join(
str(neg_pid) for neg_pid in query_negative_passage[query_id])))
def GenerateNegativePassaageID(
args,
query_embedding2id,
passage_embedding2id,
training_query_positive_id,
I_nearest_neighbor,
effective_q_id):
query_negative_passage = {}
SelectTopK = args.ann_measure_topk_mrr
mrr = 0 # only meaningful if it is SelectTopK = True
num_queries = 0
for query_idx in range(I_nearest_neighbor.shape[0]):
query_id = query_embedding2id[query_idx]
if query_id not in effective_q_id:
continue
num_queries += 1
pos_pid = training_query_positive_id[query_id]
top_ann_pid = I_nearest_neighbor[query_idx, :].copy()
if SelectTopK:
selected_ann_idx = top_ann_pid[:args.negative_sample + 1]
else:
negative_sample_I_idx = list(range(I_nearest_neighbor.shape[1]))
random.shuffle(negative_sample_I_idx)
selected_ann_idx = top_ann_pid[negative_sample_I_idx]
query_negative_passage[query_id] = []
neg_cnt = 0
rank = 0
for idx in selected_ann_idx:
neg_pid = passage_embedding2id[idx]
rank += 1
if neg_pid == pos_pid:
if rank <= 10:
mrr += 1 / rank
continue
if neg_pid in query_negative_passage[query_id]:
continue
if neg_cnt >= args.negative_sample:
break
query_negative_passage[query_id].append(neg_pid)
neg_cnt += 1
if SelectTopK:
print("Rank:" + str(args.rank) +
" --- ANN MRR:" + str(mrr / num_queries))
return query_negative_passage
def EvalDevQuery(
args,
query_embedding2id,
passage_embedding2id,
dev_query_positive_id,
I_nearest_neighbor):
# [qid][docid] = docscore, here we use -rank as score, so the higher the rank (1 > 2), the higher the score (-1 > -2)
prediction = {}
for query_idx in range(I_nearest_neighbor.shape[0]):
query_id = query_embedding2id[query_idx]
prediction[query_id] = {}
top_ann_pid = I_nearest_neighbor[query_idx, :].copy()
selected_ann_idx = top_ann_pid[:50]
rank = 0
seen_pid = set()
for idx in selected_ann_idx:
pred_pid = passage_embedding2id[idx]
if pred_pid not in seen_pid:
# this check handles multiple vector per document
rank += 1
prediction[query_id][pred_pid] = -rank
seen_pid.add(pred_pid)
# use out of the box evaluation script
evaluator = pytrec_eval.RelevanceEvaluator(
convert_to_string_id(dev_query_positive_id),
{'map_cut', 'ndcg_cut', 'recip_rank','recall', 'P'}
)
eval_query_cnt = 0
result = evaluator.evaluate(convert_to_string_id(prediction))
ndcg = defaultdict(int)
precision = defaultdict(int)
Map = 0
mrr = 0
recall_1k = 0
recall_100 = 0
cuts = [5, 10, 20]
for k in result.keys():
eval_query_cnt += 1
for cut in cuts:
ndcg[cut] += result[k][f"ndcg_cut_{cut}"]
precision[cut] += result[k][f"P_{cut}"]
Map += result[k]["map_cut_10"]
mrr += result[k]["recip_rank"]
recall_1k += result[k]["recall_1000"]
recall_100 += result[k]["recall_100"]
result_dict = {}
for cut in cuts:
result_dict[f'ndcg@{cut}'] = ndcg[cut] / eval_query_cnt
result_dict[f'p@{cut}'] = precision[cut] / eval_query_cnt
result_dict['map'] = Map / eval_query_cnt
result_dict['mrr'] = mrr / eval_query_cnt
result_dict['recall_1k'] = recall_1k / eval_query_cnt
result_dict['recall_100'] = recall_100 / eval_query_cnt
print("Rank:" + str(args.rank) \
+ " --- ANN NDCG@20:" + str(result_dict['ndcg@20']) \
+ " --- ANN MRR:" + str(result_dict['mrr']) \
+ " --- ANN P@20:" + str(result_dict['p@20'])
)
return result_dict, eval_query_cnt
def get_arguments():
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--srd_data_dir",
default=None,
type=str,
required=True,
help="The input data dir for source domain train set. "
"Should contain the .tsv files (or other data files) for the task. "
"For now it's msmarco.",
)
parser.add_argument(
"--tgd_data_dir",
default=None,
type=str,
required=False,
help="The input data dir for target domain train set. For now it's cqgtc.",
)
parser.add_argument(
"--training_dir",
default=None,
type=str,
required=True,
help="Training dir, will look for latest checkpoint dir in here",
)
parser.add_argument(
"--init_model_dir",
default=None,
type=str,
required=True,
help="Initial model dir, will use this if no checkpoint is found in training_dir",
)
parser.add_argument(
"--last_checkpoint_dir",
default="",
type=str,
help="Last checkpoint used, this is for rerunning this script when some ann data is already generated",
)
parser.add_argument(
"--model_type",
default=None,
type=str,
required=True,
help="Model type selected in the list: " +
", ".join(
MSMarcoConfigDict.keys()),
)
parser.add_argument(
"--output_dir",
default=None,
type=str,
required=True,
help="The output directory where the training data will be written",
)
parser.add_argument(
"--tgd_output_dir",
default=None,
type=str,
required=False,
help="The output directory where the tgd data will be written",
)
parser.add_argument(
"--cache_dir",
default=None,
type=str,
required=True,
help="The directory where cached data will be written",
)
parser.add_argument(
"--end_output_num",
default=-
1,
type=int,
help="Stop after this number of data versions has been generated, default run forever",
)
parser.add_argument(
"--max_seq_length",
default=128,
type=int,
help="The maximum total input sequence (document) length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded.",
)
parser.add_argument(
"--max_query_length",
default=64,
type=int,
help="The maximum total input query length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded.",
)
parser.add_argument(
"--max_query_length_tgd",
default=None,
type=int,
help="The maximum total input query length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded. Default is args.max_query_length. "
"This argument is used only when target domain is arguana, where max_query_len=512 is needed.",
)
parser.add_argument(
"--max_doc_character",
default=10000,
type=int,
help="used before tokenizer to save tokenizer latency",
)
parser.add_argument(
"--per_gpu_eval_batch_size",
default=128,
type=int,
help="The starting output file number",
)
parser.add_argument(
"--ann_chunk_factor",
default=5, # for 500k queryes, divided into 100k chunks for each epoch
type=int,
help="devide training queries into chunks",
)
parser.add_argument(
"--topk_training",
default=500,
type=int,
help="top k from which negative samples are collected",
)
parser.add_argument(
"--negative_sample",
default=5,
type=int,
help="at each resample, how many negative samples per query do I use",
)
parser.add_argument(
"--nn_topk_training",
default=50,
type=int,
help="top k from which negative samples are collected (for nn discriminator)",
)
parser.add_argument(
"--nn_posneg_sample",
default=5,
type=int,
help="at each resample, how many negative samples per query do I use",
)
parser.add_argument(
"--ann_measure_topk_mrr",
default=False,
action="store_true",
help="load scheduler from checkpoint or not",
)
parser.add_argument(
"--only_keep_latest_embedding_file",
default=False,
action="store_true",
help="load scheduler from checkpoint or not",
)
parser.add_argument(
"--fix_refresh_rate",
type=int,
default=0,
help="Fix the ANN index refresh rate to X global steps. If X is 0 then we don't fix it.",
)
parser.add_argument(
"--no_cuda",
action="store_true",
help="Avoid using CUDA when available",
)
parser.add_argument(
"--local_rank",
type=int,
default=-1,
help="For distributed training: local_rank",
)
parser.add_argument(
"--server_ip",
type=str,
default="",
help="For distant debugging.",
)
parser.add_argument(
"--server_port",
type=str,
default="",
help="For distant debugging.",
)
parser.add_argument(
"--inference",
default=False,
action="store_true",
help="only do inference if specify",
)
args = parser.parse_args()
return args
def set_env(args):
# Setup CUDA, GPU & distributed training
if args.local_rank == -1 or args.no_cuda:
device = torch.device(
"cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
args.n_gpu = torch.cuda.device_count()
else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
torch.distributed.init_process_group(backend="nccl")
args.n_gpu = 1
args.device = device
# store args
if args.local_rank != -1:
args.world_size = torch.distributed.get_world_size()
args.rank = dist.get_rank()
else:
args.world_size = 1
args.rank = 0
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN,
)
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s",
args.local_rank,
device,
args.n_gpu,
bool(args.local_rank != -1),
)
def ann_data_gen(args):
last_checkpoint = args.last_checkpoint_dir
ann_no, _, _ = get_latest_ann_data(args.output_dir) # train only, since we only care about ann_no
output_num = ann_no + 1
logger.info("starting output number %d", output_num)
if is_first_worker():
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
if args.tgd_output_dir is not None:
if not os.path.exists(args.tgd_output_dir):
os.makedirs(args.tgd_output_dir)
if not os.path.exists(args.cache_dir):
os.makedirs(args.cache_dir)
srd_positive_id = load_positive_ids(args.srd_data_dir)
srd_dev_positive_id = load_positive_ids(args.srd_data_dir, dev_set=True)
tgd_positive_id = None
if args.tgd_data_dir is not None:
tgd_positive_id = load_positive_ids(args.tgd_data_dir)
while args.end_output_num == -1 or output_num <= args.end_output_num:
next_checkpoint, latest_step_num = get_latest_checkpoint(args)
if args.only_keep_latest_embedding_file:
latest_step_num = 0
if next_checkpoint == last_checkpoint:
time.sleep(60)
else:
logger.info("start generate ann data number %d", output_num)
logger.info("next checkpoint at " + next_checkpoint)
generate_new_ann( # for both train and tgd
args,
output_num,
next_checkpoint,
srd_positive_id,
srd_dev_positive_id,
tgd_positive_id,
latest_step_num)
if args.inference:
break
logger.info("finished generating ann data number %d", output_num)
output_num += 1
last_checkpoint = next_checkpoint
if args.local_rank != -1:
dist.barrier()
def main():
args = get_arguments()
set_env(args)
ann_data_gen(args)
if __name__ == "__main__":
main()
| 31,788 | 31.2077 | 121 | py |
modir | modir-master/drivers/run_ann.py | import sys
sys.path += ['../']
import os
import time
import torch
from data.msmarco_data import GetTrainingDataProcessingFn, GetTripletTrainingDataProcessingFn
from utils.util import (
getattr_recursive,
set_seed,
StreamingDataset,
EmbeddingCache,
get_checkpoint_no,
get_latest_ann_data,
is_first_worker
)
import pandas as pd
from transformers import glue_processors as processors
from transformers import (
AdamW,
RobertaConfig,
RobertaForSequenceClassification,
RobertaTokenizer,
get_linear_schedule_with_warmup
)
import transformers
from utils.lamb import Lamb
from utils.modir_utils import (
compute_total_grad_L2_norm, intrain_dev_eval, intrain_save_checkpoint,
build_dl_iter_from_file, get_next,
build_input_from_batch, get_module
)
from data.msmarco_data import GetProcessingFn
from model.models import MSMarcoConfigDict, ALL_MODELS
from model.domain_classifier import DomainClassifier, DummyModule, dry_dc_evaluation
from torch import nn
import torch.distributed as dist
from tqdm import tqdm, trange
from torch.utils.data.distributed import DistributedSampler
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
import numpy as np
from os.path import isfile, join
import argparse
import glob
import json
import logging
import random
import faiss
try:
from apex import amp
except ImportError:
print("apex not imported")
torch.multiprocessing.set_sharing_strategy('file_system')
try:
from torch.utils.tensorboard import SummaryWriter
except ImportError:
from tensorboardX import SummaryWriter
logger = logging.getLogger(__name__)
# logging.disable(20) # supressing logger.info
faiss.omp_set_num_threads(16)
faiss_dim = 768 # it's unlikely that this will need to be changed
def GetTripletTrainingDataProcessingFnWithSeparatePassageCache(
args, query_cache, passage_cache, another_passage_cache
):
def fn(line, i):
line_arr = line.split('\t')
qid = int(line_arr[0])
for pos_pid, neg_pid in zip(
[int(pos_pid) for pos_pid in line_arr[1].split(',')],
[int(neg_pid) for neg_pid in line_arr[2].split(',')]
):
query_data = GetProcessingFn(
args, query=False)(
query_cache[qid], qid)[0]
pos_data = GetProcessingFn(
args, query=False)(
passage_cache[pos_pid], pos_pid)[0]
neg_data = GetProcessingFn(
args, query=False)(
another_passage_cache[neg_pid], neg_pid)[0]
yield (
query_data[0], query_data[1], query_data[2],
pos_data[0], pos_data[1], pos_data[2],
neg_data[0], neg_data[1], neg_data[2]
)
return fn
def build_train_dataset_from_ann(
args,
query_cache, passage_cache,
tb_writer,
global_step,
last_ann_no,
ann_dir,
):
# check if new ann training data is availabe
ann_no, ann_path, ndcg_json = get_latest_ann_data(ann_dir)
if ann_path is not None and ann_no != last_ann_no:
try:
logger.info("Training on new ANN data at %s", ann_path)
with open(ann_path, 'r') as f:
ann_training_data = f.readlines()
aligned_size = (len(ann_training_data) //
args.world_size) * args.world_size
ann_training_data = ann_training_data[:aligned_size]
logger.info("Total ann queries: %d", len(ann_training_data))
if args.triplet:
train_dataset = StreamingDataset(
ann_training_data,
GetTripletTrainingDataProcessingFn(
args, query_cache, passage_cache, tgd=not(ann_dir==args.ann_dir))
)
else:
train_dataset = StreamingDataset(
ann_training_data,
GetTrainingDataProcessingFn(
args, query_cache, passage_cache)
)
train_dataloader = DataLoader(
train_dataset, batch_size=args.train_batch_size)
if args.local_rank != -1:
dist.barrier()
update = True
except FileNotFoundError:
update = False
train_dataloader = None
if is_first_worker() and ann_dir==args.ann_dir:
# add ndcg at checkpoint step used instead of current step
# ndcg_json will not be None since this is args.ann_dir
metric_step = ndcg_json['checkpoint'].strip('/').split('/')[-1].split('-')[-1]
try:
metric_step = int(metric_step)
except ValueError:
metric_step = 0
for key in ndcg_json:
if key != 'checkpoint':
tb_writer.add_scalar(
key, ndcg_json[key], metric_step
)
last_ann_no = ann_no
return update, (train_dataloader, last_ann_no)
return False, (None, None)
def show(model):
# for debugging: print the first parameter of the model
for p in model.parameters():
entry = p
while True:
try:
entry = entry[0]
except:
return entry.item()
def train(args, model, dc_model, tokenizer,
caches, tgd_file_name, file_process_fn,
):
""" Train the model """
logger.info("Training/evaluation parameters %s", args)
tb_writer = None
if is_first_worker():
tb_writer = SummaryWriter(log_dir=args.log_dir)
query_cache, passage_cache = caches
tgd_file = open(tgd_file_name)
args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)
real_batch_size = args.train_batch_size * args.gradient_accumulation_steps * \
(torch.distributed.get_world_size() if args.local_rank != -1 else 1)
# Create a static copy of dc_model
static_dc_model = DomainClassifier(args)
static_dc_model.to(args.device)
# optimizer for ANCE
optimizer_grouped_parameters = []
layer_optim_params = set()
for layer_name in [
"roberta.embeddings",
"score_out",
"downsample1",
"downsample2",
"downsample3"]:
layer = getattr_recursive(model, layer_name)
if layer is not None:
optimizer_grouped_parameters.append({"params": layer.parameters()})
for p in layer.parameters():
layer_optim_params.add(p)
if getattr_recursive(model, "roberta.encoder.layer") is not None:
for layer in model.roberta.encoder.layer:
optimizer_grouped_parameters.append({"params": layer.parameters()})
for p in layer.parameters():
layer_optim_params.add(p)
optimizer_grouped_parameters.append(
{"params": [p for p in model.parameters() if p not in layer_optim_params]})
if args.optimizer.lower() == "lamb":
optimizer_constructor = lambda param, lr, decay: Lamb(
param, lr=lr, eps=args.adam_epsilon, weight_decay=decay
)
elif args.optimizer.lower() == "adamw":
optimizer_constructor = lambda param, lr, decay: AdamW(
param, lr=lr, eps=args.adam_epsilon, weight_decay=decay
)
else:
raise NotImplementedError(
f"Optimizer {args.optimizer} not recognized! Can only be lamb or adamW")
optimizer = optimizer_constructor(optimizer_grouped_parameters, args.learning_rate, args.weight_decay)
dc_optimizer = optimizer_constructor(dc_model.parameters(), args.dc_learning_rate, args.dc_weightDecay)
# Check if saved optimizer or scheduler states exist
if os.path.isfile(
os.path.join(
args.model_name_or_path,
"optimizer.pt")) and args.load_optimizer_scheduler:
# Load in optimizer and scheduler states
optimizer.load_state_dict(
torch.load(
os.path.join(
args.model_name_or_path,
"optimizer.pt")))
logger.info("Start fp16 and distributed model init")
if args.fp16:
if 'apex' not in sys.modules:
raise ImportError(
"Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
[model, dc_model, static_dc_model], [optimizer, dc_optimizer] = amp.initialize(
[model, dc_model, static_dc_model],
[optimizer, dc_optimizer],
opt_level=args.fp16_opt_level
)
# multi-gpu training (should be after apex fp16 initialization)
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
dc_model = torch.nn.DataParallel(dc_model)
static_dc_model = torch.nn.DataParallel(static_dc_model)
# Distributed training (should be after apex fp16 initialization)
if args.local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(
model,
device_ids=[args.local_rank],
output_device=args.local_rank,
find_unused_parameters=True,
)
dc_model = torch.nn.parallel.DistributedDataParallel(
dc_model,
device_ids=[args.local_rank],
output_device=args.local_rank,
find_unused_parameters=True,
)
static_dc_model = torch.nn.parallel.DistributedDataParallel(
static_dc_model,
device_ids=[args.local_rank],
output_device=args.local_rank,
find_unused_parameters=True,
)
# Train
logger.info("***** Running training *****")
logger.info(" Max steps = %d", args.max_steps)
logger.info(
" Instantaneous batch size per GPU = %d",
args.per_gpu_train_batch_size)
logger.info(
" Total train batch size (w. parallel, distributed & accumulation) = %d",
args.train_batch_size
* args.gradient_accumulation_steps
* (torch.distributed.get_world_size() if args.local_rank != -1 else 1),
)
logger.info(
" Gradient Accumulation steps = %d",
args.gradient_accumulation_steps)
global_step = 0
dyn_lamb = args.lamb
# Check if continuing training from a checkpoint
if os.path.exists(args.model_name_or_path):
# set global_step to gobal_step of last saved checkpoint from model
# path
if "-" in args.model_name_or_path:
try:
global_step = int(
args.model_name_or_path.split("-")[-1].split("/")[0])
except:
global_step=0
else:
global_step = 0
logger.info(
" Continuing training from checkpoint, will skip to saved global_step")
logger.info(" Continuing training from global step %d", global_step)
optim_monitors = [
'loss_adv_D', 'loss_adv_M', 'loss_ranking',
'dc_total_Q', 'dc_correct_Q', 'dc_total_P', 'dc_correct_P',
'dc_pre_softmax_logits_0', 'dc_pre_softmax_logits_1',
'dc_post_softmax_prob_0', 'dc_post_softmax_prob_1',
'embedding_norm',
]
optim_cumulator = {k: 0.0 for k in optim_monitors}
model_parts = ['roberta', 'projection']
model_parts_params = {
'roberta': [p for n, p in model.named_parameters() if 'embeddingHead' not in n],
'projection': [p for n, p in model.named_parameters() if 'embeddingHead' in n],
# 'domain_classifier': dc_model.parameters(),
}
grad_norm_cumulator = {k: 0.0 for k in model_parts}
grad_norm_cumulator.update({k+'-clipped': 0.0 for k in model_parts})
grad_norm_cumulator.update({
'domain_classifier': 0.0, 'domain_classifier-clipped': 0.0
})
model.zero_grad()
model.train()
dc_model.zero_grad()
dc_model.train()
set_seed(args) # Added here for reproductibility
last_ann_no = -1
train_dataloader = None
train_dataloader_iter = None
epoch_num = -1
step = 0
accumulated_srd_embs = []
accumulated_tgd_embs = []
prev_dry_dc_state_dict = None
# actual_refresh_rate = None
# prev_refresh_gstep = None
# half_eval_done = False
if args.single_warmup:
scheduler = get_linear_schedule_with_warmup(
optimizer,
num_warmup_steps=args.warmup_steps,
num_training_steps=args.max_steps)
dc_scheduler = get_linear_schedule_with_warmup(
dc_optimizer,
num_warmup_steps=args.warmup_steps,
num_training_steps=args.max_steps)
srd_update_ann, tgd_update_ann = False, False
while global_step < args.max_steps:
if step % args.gradient_accumulation_steps == 0:
if global_step % args.logging_steps == 0:
if not srd_update_ann:
srd_update_ann, (newdataloader, newlan) = build_train_dataset_from_ann(
args,
query_cache, passage_cache,
tb_writer,
global_step,
last_ann_no,
ann_dir=args.ann_dir
)
# only update if both domains' new ann are ready
if srd_update_ann:
train_dataloader, last_ann_no = newdataloader, newlan
train_dataloader_iter = iter(train_dataloader)
epoch_num += 1
if is_first_worker():
tb_writer.add_scalar(
'epoch', epoch_num, global_step
)
if global_step > 0:
prev_dry_dc_state_dict = intrain_dev_eval(
args, global_step, model, tb_writer, prev_dry_dc_state_dict,
all_datasets=True)
intrain_save_checkpoint(
args, global_step, model, tokenizer, optimizer, scheduler)
srd_update_ann, tgd_update_ann = False, False
_, tgd_epoch_iter = build_dl_iter_from_file(args, tgd_file, file_process_fn)
step += 1
# get srd batch and inputs
try:
batch = next(train_dataloader_iter)
except StopIteration:
logger.info("Finished iterating current dataset, begin reiterate")
train_dataloader_iter = iter(train_dataloader)
batch = next(train_dataloader_iter)
batch = tuple(t.to(args.device) for t in batch)
batch_size = batch[0].shape[0]
inputs = build_input_from_batch(args, batch, mode='full', triplet=True)
# get tgd batch and inputs
tgd_batch, tgd_epoch_iter = get_next(
tgd_epoch_iter, args, tgd_file, file_process_fn, batch_size)
tgd_batch = tuple(t.to(args.device).long() for t in tgd_batch)
tgd_query_inputs = build_input_from_batch(args, tgd_batch, mode='query')
if step % 2 == 0:
tgd_doc_inputs = build_input_from_batch(args, tgd_batch, mode='pos_doc')
else:
tgd_doc_inputs = build_input_from_batch(args, tgd_batch, mode='neg_doc')
##### 1. forward of the encoder model #####
if step % args.gradient_accumulation_steps == 0 or args.world_size == 1:
outputs = model(**inputs, output_dc_emb=True)
else:
with model.no_sync(), dc_model.no_sync(), static_dc_model.no_sync():
outputs = model(**inputs, output_dc_emb=True)
ranking_loss = outputs[0]
if step % 2 == 0:
srd_embs = [outputs[1][0], outputs[1][1]]
else:
srd_embs = [outputs[1][0], outputs[1][2]]
if step % args.gradient_accumulation_steps == 0 or args.world_size == 1:
tgd_query_emb = get_module(model).query_emb(**tgd_query_inputs)
tgd_doc_emb = get_module(model).body_emb(**tgd_doc_inputs)
else:
with model.no_sync(), dc_model.no_sync(), static_dc_model.no_sync():
tgd_query_emb = get_module(model).query_emb(**tgd_query_inputs)
tgd_doc_emb = get_module(model).body_emb(**tgd_doc_inputs)
tgd_embs = [tgd_query_emb, tgd_doc_emb]
detached_srd_embs = [torch.tensor(x) for x in srd_embs]
detached_tgd_embs = [torch.tensor(x) for x in tgd_embs]
if args.dc_rep_method == 'async':
if len(accumulated_srd_embs) == args.dc_rep_steps:
accumulated_srd_embs.pop(0)
accumulated_tgd_embs.pop(0)
accumulated_srd_embs.append(detached_srd_embs)
accumulated_tgd_embs.append(detached_tgd_embs)
for emb in srd_embs+tgd_embs:
optim_cumulator['embedding_norm'] += emb.norm(dim=1).mean() / 4
if args.n_gpu > 1:
ranking_loss = ranking_loss.mean()
if args.gradient_accumulation_steps > 1:
ranking_loss = ranking_loss / args.gradient_accumulation_steps
optim_cumulator['loss_ranking'] += ranking_loss.item()
# 2. feed detached embeddings to the dc_model and BP L_adv_D
for dc_rep_step in range(1+args.dc_rep_steps):
if args.dc_rep_method == 'repeat':
srd_dc_input_embs = detached_srd_embs
tgd_dc_input_embs = detached_tgd_embs
elif args.dc_rep_method == 'async':
which_step = min(dc_rep_step, len(accumulated_srd_embs)-1)
srd_dc_input_embs = accumulated_srd_embs[which_step]
tgd_dc_input_embs = accumulated_tgd_embs[which_step]
if dc_rep_step == 0:
batched_srd_dc_input_embs = srd_dc_input_embs
batched_tgd_dc_input_embs = tgd_dc_input_embs
elif dc_rep_step % args.dc_rep_step_per_batch != 0:
batched_srd_dc_input_embs[0].append(srd_dc_input_embs[0])
batched_srd_dc_input_embs[1].append(srd_dc_input_embs[1])
batched_tgd_dc_input_embs[0].append(tgd_dc_input_embs[0])
batched_tgd_dc_input_embs[1].append(tgd_dc_input_embs[1])
continue
else:
batched_srd_dc_input_embs[0].append(srd_dc_input_embs[0])
batched_srd_dc_input_embs[1].append(srd_dc_input_embs[1])
batched_tgd_dc_input_embs[0].append(tgd_dc_input_embs[0])
batched_tgd_dc_input_embs[1].append(tgd_dc_input_embs[1])
batched_srd_dc_input_embs[0] = torch.cat(batched_srd_dc_input_embs[0])
batched_srd_dc_input_embs[1] = torch.cat(batched_srd_dc_input_embs[1])
batched_tgd_dc_input_embs[0] = torch.cat(batched_tgd_dc_input_embs[0])
batched_tgd_dc_input_embs[1] = torch.cat(batched_tgd_dc_input_embs[1])
# 2.1 feed detached embeddings to the dc_model
L_adv_D = 0.0
label_size = batch_size * (1 if dc_rep_step==0 else args.dc_rep_step_per_batch)
srd_labels = torch.tensor([0] * label_size, device=args.device)
tgd_labels = torch.tensor([1] * label_size, device=args.device)
for i_emb, emb in enumerate(batched_srd_dc_input_embs):
labels = srd_labels
if step % args.gradient_accumulation_steps == 0 or args.world_size == 1:
dc_srd_outputs = dc_model(emb, labels=labels)
else:
with model.no_sync(), dc_model.no_sync(), static_dc_model.no_sync():
dc_srd_outputs = dc_model(emb, labels=labels)
L_adv_D += dc_srd_outputs[1] * args.dc_rep_step_per_batch # scale up because of the average in cross_entropy
if dc_rep_step == 0:
suffix = 'Q' if i_emb==0 else 'P'
optim_cumulator[f'dc_total_{suffix}'] += dc_srd_outputs[2][0]
optim_cumulator[f'dc_correct_{suffix}'] += dc_srd_outputs[2][1]
optim_cumulator['dc_pre_softmax_logits_0'] += dc_srd_outputs[0][:, 0].mean() / 4
optim_cumulator['dc_pre_softmax_logits_1'] += dc_srd_outputs[0][:, 1].mean() / 4
probs = torch.softmax(dc_srd_outputs[0], dim=1)
optim_cumulator['dc_post_softmax_prob_0'] += probs[:, 0].mean() / 4
optim_cumulator['dc_post_softmax_prob_1'] += probs[:, 1].mean() / 4
for i_emb, emb in enumerate(batched_tgd_dc_input_embs):
labels = tgd_labels
if step % args.gradient_accumulation_steps == 0 or args.world_size == 1:
dc_tgd_outputs = dc_model(emb, labels=labels)
else:
with model.no_sync(), dc_model.no_sync(), static_dc_model.no_sync():
dc_tgd_outputs = dc_model(emb, labels=labels)
L_adv_D += dc_tgd_outputs[1] * args.dc_rep_step_per_batch # scale up because of the average in cross_entropy
if dc_rep_step == 0:
suffix = 'Q' if i_emb==0 else 'P'
optim_cumulator[f'dc_total_{suffix}'] += dc_tgd_outputs[2][0]
optim_cumulator[f'dc_correct_{suffix}'] += dc_tgd_outputs[2][1]
optim_cumulator['dc_pre_softmax_logits_0'] += dc_tgd_outputs[0][:, 0].mean() / 4
optim_cumulator['dc_pre_softmax_logits_1'] += dc_tgd_outputs[0][:, 1].mean() / 4
probs = torch.softmax(dc_tgd_outputs[0], dim=1)
optim_cumulator['dc_post_softmax_prob_0'] += probs[:, 0].mean() / 4
optim_cumulator['dc_post_softmax_prob_1'] += probs[:, 1].mean() / 4
if dc_rep_step % args.dc_rep_step_per_batch == 0:
batched_srd_dc_input_embs = [[], []]
batched_tgd_dc_input_embs = [[], []]
if dc_rep_step == 0:
continue # this dc_rep_step is only for logging things for optim_cumulator
if args.n_gpu > 1:
L_adv_D = L_adv_D.mean() # mean() to average on multi-gpu parallel training
if args.gradient_accumulation_steps > 1:
L_adv_D = L_adv_D / args.gradient_accumulation_steps
optim_cumulator['loss_adv_D'] += L_adv_D.item() / args.dc_rep_steps
# 2.2 BP of L_adv_D; dc_optimizer update
if args.fp16:
with amp.scale_loss(L_adv_D, dc_optimizer) as scaled_loss:
scaled_loss.backward()
else:
if step % args.gradient_accumulation_steps == 0 or args.world_size == 1:
L_adv_D.backward()
else:
with model.no_sync(), dc_model.no_sync(), static_dc_model.no_sync():
L_adv_D.backward()
if step % args.gradient_accumulation_steps == 0:
grad_norm_cumulator['domain_classifier'] += compute_total_grad_L2_norm(
dc_model.parameters()
) / args.dc_rep_steps
if not args.no_gn_clip:
if args.fp16:
torch.nn.utils.clip_grad_norm_(
amp.master_params(dc_optimizer), args.max_grad_norm)
else:
torch.nn.utils.clip_grad_norm_(
dc_model.parameters(), args.max_grad_norm)
grad_norm_cumulator['domain_classifier-clipped'] += compute_total_grad_L2_norm(
dc_model.parameters()
) / args.dc_rep_steps
dc_optimizer.step()
dc_model.zero_grad()
if step % args.gradient_accumulation_steps == 0:
dc_scheduler.step() # this is outside of the dc_rep_step loop
# 3.1 copy the dc_model, feed (undetached) embeddings to it
get_module(static_dc_model).load_state_dict(get_module(dc_model).state_dict())
L_adv_M = 0.0
if args.dc_loss_choice == 'minimax':
srd_labels = torch.tensor([0] * batch_size, device=args.device)
tgd_labels = torch.tensor([1] * batch_size, device=args.device)
elif args.dc_loss_choice == 'gan':
tgd_labels = torch.tensor([0] * batch_size, device=args.device)
elif args.dc_loss_choice == 'confusion':
srd_labels = 'uniform'
tgd_labels = 'uniform'
else:
raise NotImplementedError()
if args.dc_loss_choice != 'gan':
for emb in srd_embs:
if step % args.gradient_accumulation_steps == 0 or args.world_size == 1:
dc_srd_outputs = static_dc_model(emb, labels=srd_labels)
else:
with model.no_sync(), dc_model.no_sync(), static_dc_model.no_sync():
dc_srd_outputs = static_dc_model(emb, labels=srd_labels)
L_adv_M += dc_srd_outputs[1]
for emb in tgd_embs:
if step % args.gradient_accumulation_steps == 0 or args.world_size == 1:
dc_tgd_outputs = static_dc_model(emb, labels=tgd_labels)
else:
with model.no_sync(), dc_model.no_sync(), static_dc_model.no_sync():
dc_tgd_outputs = static_dc_model(emb, labels=tgd_labels)
L_adv_M += dc_tgd_outputs[1]
if args.dc_loss_choice == 'minimax':
L_adv_M = -L_adv_M
L_adv_M *= dyn_lamb
if args.n_gpu > 1:
L_adv_M = L_adv_M.mean()
if args.gradient_accumulation_steps > 1:
L_adv_M = L_adv_M / args.gradient_accumulation_steps
optim_cumulator['loss_adv_M'] += L_adv_M.item()
# 3.2 BP of ranking loss and L_adv_M; optimizer update
loss = ranking_loss + L_adv_M
if args.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
if step % args.gradient_accumulation_steps == 0 or args.world_size == 1:
loss.backward()
else:
with model.no_sync(), dc_model.no_sync(), static_dc_model.no_sync():
loss.backward()
if step % args.gradient_accumulation_steps == 0:
for model_part, params in model_parts_params.items():
grad_norm_cumulator[model_part] += compute_total_grad_L2_norm(params)
if not args.no_gn_clip:
if args.fp16:
torch.nn.utils.clip_grad_norm_(
amp.master_params(optimizer), args.max_grad_norm)
else:
torch.nn.utils.clip_grad_norm_(
model.parameters(), args.max_grad_norm)
for model_part, params in model_parts_params.items():
grad_norm_cumulator[model_part+'-clipped'] += compute_total_grad_L2_norm(params)
optimizer.step()
scheduler.step()
model.zero_grad()
global_step += 1
# end of the main part of training
if step % args.gradient_accumulation_steps == 0:
if args.lamb_reduce_to_half_steps > 0:
if is_first_worker():
tb_writer.add_scalar("lambda", dyn_lamb, global_step)
dyn_lamb = args.lamb * 2**(-global_step / args.lamb_reduce_to_half_steps)
if (args.logging_steps > 0 and global_step % args.logging_steps == 0):
logs = {}
logs["linear_layer_L2norm"] = get_module(dc_model).layers[0].weight.norm().item()
logs["linear_layer_mean"] = get_module(dc_model).layers[0].weight.mean().item()
logs["learning_rate"] = scheduler.get_last_lr()[0]
logs["learning_rate_dc"] = dc_optimizer.param_groups[0]['lr']
logs["dc_acc_Q"] = optim_cumulator['dc_correct_Q'] / (1e-10 + optim_cumulator['dc_total_Q'])
logs["dc_acc_P"] = optim_cumulator['dc_correct_P'] / (1e-10 + optim_cumulator['dc_total_P'])
for k in optim_monitors:
if k not in ['dc_total_Q', 'dc_correct_Q', 'dc_total_P', 'dc_correct_P']:
logs[k] = float(optim_cumulator[k] / args.logging_steps / args.gradient_accumulation_steps)
optim_cumulator = {k: 0.0 for k in optim_monitors} # reset
if is_first_worker():
for key, value in logs.items():
tb_writer.add_scalar(key, value, global_step)
logs.update({k: v/args.logging_steps for k, v in grad_norm_cumulator.items()})
logger.info(json.dumps({**logs, **{"step": global_step}}))
for key, value in grad_norm_cumulator.items():
tb_writer.add_scalar(
'grad_norm-'+key,
value / args.logging_steps,
global_step)
grad_norm_cumulator[key] = 0.0 # reset
if args.eval_steps > 0 and global_step % args.eval_steps == 0:
prev_dry_dc_state_dict = intrain_dev_eval(
args, global_step, model, tb_writer, prev_dry_dc_state_dict)
intrain_save_checkpoint(
args, global_step, model, tokenizer, optimizer, scheduler)
if args.local_rank == -1 or torch.distributed.get_rank() == 0:
tb_writer.close()
tgd_file.close()
return global_step
def get_arguments():
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--data_dir",
default=None,
type=str,
required=True,
help="The input data dir. Should contain the cached passage and query files",
)
parser.add_argument(
"--tgd_raw_data_dir",
default=None,
type=str,
help="The input raw data dir for target domain.",
)
parser.add_argument(
"--tgd_data_name",
default=None,
type=str,
required=False,
help="The target domain dataset name.",
)
parser.add_argument(
"--intraindev_data_dir",
default=None,
type=str,
required=False,
help="The input data dir for in-train-dev set.",
)
parser.add_argument(
"--intraindev_data_name",
default=None,
type=str,
required=False,
help="The in-train-dev dataset name.",
)
parser.add_argument(
"--ann_dir",
default=None,
type=str,
required=True,
help="The ann training data dir. Should contain the output of ann data generation job",
)
parser.add_argument(
"--tgd_ann_dir",
default=None,
type=str,
required=False,
help="The ann training data dir for tgd. Should contain the output of ann data generation job",
)
parser.add_argument(
"--model_type",
default=None,
type=str,
required=True,
help="Model type selected in the list: " +
", ".join(
MSMarcoConfigDict.keys()),
)
parser.add_argument(
"--model_name_or_path",
default=None,
type=str,
required=True,
help="Path to pre-trained model or shortcut name selected in the list: " +
", ".join(ALL_MODELS),
)
parser.add_argument(
"--task_name",
default=None,
type=str,
required=True,
help="The name of the task to train selected in the list: " +
", ".join(
processors.keys()),
)
parser.add_argument(
"--output_dir",
default=None,
type=str,
required=True,
help="The output directory where the model predictions and checkpoints will be written.",
)
# Other parameters
parser.add_argument(
"--config_name",
default="",
type=str,
help="Pretrained config name or path if not the same as model_name",
)
parser.add_argument(
"--tokenizer_name",
default="",
type=str,
help="Pretrained tokenizer name or path if not the same as model_name",
)
parser.add_argument(
"--cache_dir",
default="",
type=str,
help="Where do you want to store the pre-trained models downloaded from s3",
)
parser.add_argument(
"--saved_embedding_dir",
default="",
type=str,
help="The directory where intraindev embeddings are dumped",
)
parser.add_argument(
"--max_seq_length",
default=128,
type=int,
help="The maximum total input sequence (document) length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded.",
)
parser.add_argument(
"--max_query_length",
default=64,
type=int,
help="The maximum total input query length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded.",
)
parser.add_argument(
"--max_query_length_tgd",
default=None,
type=int,
help="The maximum total input query length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded. Default is args.max_query_length. "
"This argument is used only when target domain is arguana, where max_query_len=512 is needed.",
)
parser.add_argument(
"--triplet",
default=False,
action="store_true",
help="Whether to run training.",
)
parser.add_argument(
"--do_lower_case",
action="store_true",
help="Set this flag if you are using an uncased model.",
)
parser.add_argument(
"--log_dir",
default=None,
type=str,
help="Tensorboard log dir",
)
parser.add_argument(
"--optimizer",
default="lamb",
type=str,
help="Optimizer - lamb or adamW",
)
parser.add_argument(
"--dc_method",
default="classification",
type=str,
help="What to do for domain confusion. "
"classification: classify the source of a vector representation. "
"knn: a representation's k-nearest neighbors should have members from both domain "
"(its implementation is removed; check c1fae2c).")
parser.add_argument(
"--dc_loss_choice",
default="minimax",
type=str,
help="Adversarial loss choice (ADDA paper, Table 1, 4th column).")
parser.add_argument(
"--dc_layers",
default=1,
type=int,
help="How many layers to use for the domain classifier",
)
parser.add_argument(
"--per_gpu_train_batch_size",
default=8,
type=int,
help="Batch size per GPU/CPU for training.",
)
parser.add_argument(
"--per_gpu_eval_batch_size",
default=8,
type=int,
help="Batch size per GPU/CPU for evaluation.",
)
parser.add_argument(
"--gradient_accumulation_steps",
type=int,
default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.",
)
parser.add_argument(
"--learning_rate",
default=5e-5,
type=float,
help="The initial learning rate for Adam.",
)
parser.add_argument(
"--dc_learning_rate",
default=5e-5,
type=float,
help="The initial learning rate for dc_model.",
)
parser.add_argument(
"--weight_decay",
default=0.0,
type=float,
help="Weight decay if we apply some.",
)
parser.add_argument(
"--dropout_rate",
default=0.1,
type=float,
help="Dropout probability",
)
parser.add_argument(
"--adam_epsilon",
default=1e-8,
type=float,
help="Epsilon for Adam optimizer.",
)
parser.add_argument(
"--lamb",
default=0.1,
type=float,
help="Coefficient for domain classification loss.",
)
parser.add_argument(
"--lamb_reduce_to_half_steps",
default=0,
type=int,
help="Reduce dyn_lamb exponentially, and it will be reduced to a half after X steps.",
)
parser.add_argument(
"--dc_rep_steps",
default=1,
type=int,
help="Update dc_model over a single batch for X steps.",
)
parser.add_argument(
"--dc_rep_method",
default="repeat",
type=str,
help="Use what data for dc repetitive training. "
"repeat: use the same batch repetitively; "
"async: use embeddings recorded from previous batches."
)
parser.add_argument(
"--dc_rep_step_per_batch",
default=1,
type=int,
help="For dc_rep, how many steps of embeddings to put in one batch",
)
parser.add_argument(
"--dc_weightDecay",
default=0.0,
type=float,
help="Weight decay if we apply some for domain classifier.",
)
parser.add_argument(
"--no_gn_clip",
action="store_true",
help="Whether to disable grad norm clipping",
)
parser.add_argument(
"--max_grad_norm",
default=1.0,
type=float,
help="Max gradient norm.",
)
parser.add_argument(
"--max_steps",
default=1000000,
type=int,
help="If > 0: set total number of training steps to perform",
)
parser.add_argument(
"--warmup_steps",
default=0,
type=int,
help="Linear warmup over warmup_steps.",
)
parser.add_argument(
"--logging_steps",
type=int,
default=500,
help="Log every X updates steps.",
)
parser.add_argument(
"--eval_steps",
type=int,
default=500,
help="Evaluate the model every X updates steps.",
)
parser.add_argument(
"--no_cuda",
action="store_true",
help="Avoid using CUDA when available",
)
parser.add_argument(
"--seed",
type=int,
default=42,
help="random seed for initialization",
)
parser.add_argument(
"--fp16",
action="store_true",
help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit",
)
parser.add_argument(
"--fp16_opt_level",
type=str,
default="O1",
help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html",
)
# ----------------- ANN HyperParam ------------------
parser.add_argument(
"--load_optimizer_scheduler",
default=False,
action="store_true",
help="load scheduler from checkpoint or not",
)
parser.add_argument(
"--single_warmup",
default=False,
action="store_true",
help="use single or re-warmup",
)
# ----------------- End of Doc Ranking HyperParam ------------------
parser.add_argument(
"--local_rank",
type=int,
default=-1,
help="For distributed training: local_rank",
)
parser.add_argument(
"--server_ip",
type=str,
default="",
help="For distant debugging.",
)
parser.add_argument(
"--server_port",
type=str,
default="",
help="For distant debugging.",
)
args = parser.parse_args()
# sort intraindev datasets, so that tinymsmarco is the first and the target domain dataset is the second
args.intraindev_data_name = args.intraindev_data_name.split(',')
args.intraindev_data_dir = args.intraindev_data_dir.split(',')
assert args.intraindev_data_name[0] == 'tinymsmarco'
assert len(args.intraindev_data_name) >= 2
try:
tgd_position = args.intraindev_data_name.index(args.tgd_data_name)
args.intraindev_data_name[1], args.intraindev_data_name[tgd_position] = args.intraindev_data_name[tgd_position], args.intraindev_data_name[1]
args.intraindev_data_dir[1], args.intraindev_data_dir[tgd_position] = args.intraindev_data_dir[tgd_position], args.intraindev_data_dir[1]
args.mix_tgd = False
except ValueError:
args.mix_tgd = True
return args
def set_env(args):
# Setup distant debugging if needed
if args.server_ip and args.server_port:
# Distant debugging - see
# https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach")
ptvsd.enable_attach(
address=(
args.server_ip,
args.server_port),
redirect_output=True)
ptvsd.wait_for_attach()
# Setup CUDA, GPU & distributed training
if args.local_rank == -1 or args.no_cuda:
device = torch.device(
"cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
args.n_gpu = torch.cuda.device_count()
else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
torch.distributed.init_process_group(backend="nccl")
args.n_gpu = 1
args.device = device
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN,
)
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
args.local_rank,
device,
args.n_gpu,
bool(args.local_rank != -1),
args.fp16,
)
# Set seed
set_seed(args)
def load_model(args):
# Prepare GLUE task
args.task_name = args.task_name.lower()
args.output_mode = "classification"
label_list = ["0", "1"]
num_labels = len(label_list)
# store args
if args.local_rank != -1:
args.world_size = torch.distributed.get_world_size()
args.rank = dist.get_rank()
else:
args.world_size = 1
# Load pretrained model and tokenizer
if args.local_rank not in [-1, 0]:
# Make sure only the first process in distributed training will
# download model & vocab
torch.distributed.barrier()
args.model_type = args.model_type.lower()
configObj = MSMarcoConfigDict[args.model_type]
config = configObj.config_class.from_pretrained(
args.config_name if args.config_name else args.model_name_or_path,
num_labels=num_labels,
finetuning_task=args.task_name,
cache_dir=args.cache_dir if args.cache_dir else None,
)
config.output_hidden_states = True
change_dropout_rate(config, args.dropout_rate)
tokenizer = configObj.tokenizer_class.from_pretrained(
args.tokenizer_name if args.tokenizer_name else args.model_name_or_path,
do_lower_case=args.do_lower_case,
cache_dir=args.cache_dir if args.cache_dir else None,
)
model = configObj.model_class.from_pretrained(
args.model_name_or_path,
from_tf=bool(".ckpt" in args.model_name_or_path),
config=config,
cache_dir=args.cache_dir if args.cache_dir else None,
)
if args.local_rank == 0:
# Make sure only the first process in distributed training will
# download model & vocab
torch.distributed.barrier()
model.to(args.device)
def file_process_fn(line, i):
return configObj.process_fn(line, i, tokenizer, args)
return tokenizer, model, file_process_fn
def change_dropout_rate(config, val):
config.attention_probs_dropout_prob = val
config.hidden_dropout_prob = val
def save_checkpoint(args, model, tokenizer):
# Saving best-practices: if you use defaults names for the model, you can
# reload it using from_pretrained()
if is_first_worker():
# Create output directory if needed
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
logger.info("Saving model checkpoint to %s", args.output_dir)
# Save a trained model, configuration and tokenizer using `save_pretrained()`.
# They can then be reloaded using `from_pretrained()`
model_to_save = (
model.module if hasattr(model, "module") else model
) # Take care of distributed/parallel training
model_to_save.save_pretrained(args.output_dir)
tokenizer.save_pretrained(args.output_dir)
# Good practice: save your training arguments together with the trained
# model
torch.save(args, os.path.join(args.output_dir, "training_args.bin"))
if args.local_rank != -1:
dist.barrier()
def main(profiling=False):
args = get_arguments()
if profiling:
args.max_steps = 200
set_env(args)
tokenizer, model, file_process_fn = load_model(args)
dc_model = DomainClassifier(args)
dc_model.to(args.device)
query_collection_path = os.path.join(args.data_dir, "train-query")
query_cache = EmbeddingCache(query_collection_path)
passage_collection_path = os.path.join(args.data_dir, "passages")
passage_cache = EmbeddingCache(passage_collection_path)
tgd_file_name = os.path.join(args.tgd_raw_data_dir, "triples.simple.tsv")
with query_cache, passage_cache:
global_step = train(
args, model, dc_model, tokenizer,
(query_cache, passage_cache),
tgd_file_name, file_process_fn
)
logger.info(" global_step = %s", global_step)
save_checkpoint(args, model, tokenizer)
if __name__ == "__main__":
profiling = False
if profiling:
import cProfile
from pstats import SortKey
cProfile.run("main(profiling=True)", sort=SortKey.CUMULATIVE)
else:
main()
| 46,511 | 35.027885 | 149 | py |
modir | modir-master/utils/eval_mrr.py | import sys
sys.path += ["../"]
from utils.msmarco_eval import quality_checks_qids, compute_metrics, load_reference
import torch.distributed as dist
import gzip
import faiss
import numpy as np
from data.process_fn import dual_process_fn
from tqdm import tqdm
import torch
import os
from utils.util import concat_key, is_first_worker, all_gather, StreamingDataset
from torch.utils.data import DataLoader
def embedding_inference(args, path, model, fn, bz, num_workers=2, is_query=True):
f = open(path, encoding="utf-8")
model = model.module if hasattr(model, "module") else model
sds = StreamingDataset(f, fn)
loader = DataLoader(sds, batch_size=bz, num_workers=1)
emb_list, id_list = [], []
model.eval()
for i, batch in tqdm(enumerate(loader), desc="Eval", disable=args.local_rank not in [-1, 0]):
if os.environ['DEBUG']=='True' and i==20: break # debug only
batch = tuple(t.to(args.device) for t in batch)
with torch.no_grad():
inputs = {"input_ids": batch[0].long(
), "attention_mask": batch[1].long()}
idx = batch[3].long()
if is_query:
embs = model.query_emb(**inputs)
else:
embs = model.body_emb(**inputs)
if len(embs.shape) == 3:
B, C, E = embs.shape
# [b1c1, b1c2, b1c3, b1c4, b2c1 ....]
embs = embs.view(B*C, -1)
idx = idx.repeat_interleave(C)
assert embs.shape[0] == idx.shape[0]
emb_list.append(embs.detach().cpu().numpy())
id_list.append(idx.detach().cpu().numpy())
f.close()
emb_arr = np.concatenate(emb_list, axis=0)
id_arr = np.concatenate(id_list, axis=0)
return emb_arr, id_arr
def parse_top_dev(input_path, qid_col, pid_col):
ret = {}
with open(input_path, encoding="utf-8") as f:
for line in f:
cells = line.strip().split("\t")
qid = int(cells[qid_col])
pid = int(cells[pid_col])
if qid not in ret:
ret[qid] = []
ret[qid].append(pid)
return ret
def search_knn(xq, xb, k, distance_type=faiss.METRIC_L2):
""" wrapper around the faiss knn functions without index """
nq, d = xq.shape
nb, d2 = xb.shape
assert d == d2
I = np.empty((nq, k), dtype='int64')
D = np.empty((nq, k), dtype='float32')
if distance_type == faiss.METRIC_L2:
heaps = faiss.float_maxheap_array_t()
heaps.k = k
heaps.nh = nq
heaps.val = faiss.swig_ptr(D)
heaps.ids = faiss.swig_ptr(I)
faiss.knn_L2sqr(
faiss.swig_ptr(xq), faiss.swig_ptr(xb),
d, nq, nb, heaps
)
elif distance_type == faiss.METRIC_INNER_PRODUCT:
heaps = faiss.float_minheap_array_t()
heaps.k = k
heaps.nh = nq
heaps.val = faiss.swig_ptr(D)
heaps.ids = faiss.swig_ptr(I)
faiss.knn_inner_product(
faiss.swig_ptr(xq), faiss.swig_ptr(xb),
d, nq, nb, heaps
)
return D, I
def get_topk_restricted(q_emb, psg_emb_arr, pid_dict, psg_ids, pid_subset, top_k):
subset_ix = np.array([pid_dict[x]
for x in pid_subset if x != -1 and x in pid_dict])
if len(subset_ix) == 0:
_D = np.ones((top_k,))*-128
_I = (np.ones((top_k,))*-1).astype(int)
return _D, _I
else:
sub_emb = psg_emb_arr[subset_ix]
_D, _I = search_knn(q_emb, sub_emb, top_k,
distance_type=faiss.METRIC_INNER_PRODUCT)
return _D.squeeze(), psg_ids[subset_ix[_I]].squeeze() # (top_k,)
def passage_dist_eval(args, model, tokenizer, use_valid=False, vld_path=None):
if not use_valid:
base_path = args.data_dir
passage_path = os.path.join(base_path, "collection.tsv")
queries_path = os.path.join(base_path, "queries.dev.small.tsv")
top1000_path = os.path.join(base_path, "top1000.dev.tsv")
mrr_ref_path = os.path.join(base_path, "qrels.dev.small.tsv")
else:
assert vld_path is not None
print('Use Valid Set', vld_path)
base_path = vld_path
passage_path = os.path.join(base_path, 'collection.tsv')
queries_path = os.path.join(base_path, 'queries.tsv')
mrr_ref_path = os.path.join(base_path, "qrels.tsv")
def fn(line, i):
return dual_process_fn(line, i, tokenizer, args)
if not use_valid:
top1k_qid_pid = parse_top_dev(top1000_path, qid_col=0, pid_col=1)
else:
top1k_qid_pid = None
ref_dict = load_reference(mrr_ref_path)
print('Start evaluating')
reranking_mrr, full_ranking_mrr = combined_dist_eval(
args, model, queries_path, passage_path, fn, fn, top1k_qid_pid, ref_dict)
return reranking_mrr, full_ranking_mrr
def combined_dist_eval(args, model, queries_path, passage_path,
query_fn, psg_fn, topk_dev_qid_pid, ref_dict):
# get query/psg embeddings here
eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
query_embs, query_ids = embedding_inference(
args, queries_path, model, query_fn, eval_batch_size, 1, True)
query_pkl = {"emb": query_embs, "id": query_ids}
all_query_list = all_gather(query_pkl)
query_embs = concat_key(all_query_list, "emb")
query_ids = concat_key(all_query_list, "id")
print(query_embs.shape, query_ids.shape)
psg_embs, psg_ids = embedding_inference(
args, passage_path, model, psg_fn, eval_batch_size, 2, False)
print(psg_embs.shape)
top_k = 100
D, I = search_knn(query_embs, psg_embs, top_k,
distance_type=faiss.METRIC_INNER_PRODUCT)
I = psg_ids[I]
# compute reranking and full ranking mrr here
if topk_dev_qid_pid is not None:
# topk_dev_qid_pid is used for computing reranking mrr
pid_dict = dict([(p, i) for i, p in enumerate(psg_ids)])
arr_data = []
d_data = []
for i, qid in enumerate(query_ids):
q_emb = query_embs[i:i+1]
pid_subset = topk_dev_qid_pid[qid]
ds, top_pids = get_topk_restricted(
q_emb, psg_embs, pid_dict, psg_ids, pid_subset, 10)
arr_data.append(top_pids)
d_data.append(ds)
_D = np.array(d_data)
_I = np.array(arr_data)
# reranking mrr
reranking_mrr = compute_mrr(_D, _I, query_ids, ref_dict)
else:
reranking_mrr = 0.0
D2 = D[:, :100]
I2 = I[:, :100]
# full mrr
full_ranking_mrr = compute_mrr(D2, I2, query_ids, ref_dict)
del psg_embs
torch.cuda.empty_cache()
if args.local_rank != -1:
dist.barrier()
return reranking_mrr, full_ranking_mrr
def compute_mrr(D, I, qids, ref_dict):
knn_pkl = {"D": D, "I": I}
all_knn_list = all_gather(knn_pkl)
mrr = 0.0
if is_first_worker():
D_merged = concat_key(all_knn_list, "D", axis=1)
I_merged = concat_key(all_knn_list, "I", axis=1)
print(D_merged.shape, I_merged.shape)
# we pad with negative pids and distance -128 - if they make it to the top we have a problem
idx = np.argsort(D_merged, axis=1)[:, ::-1][:, :10]
sorted_I = np.take_along_axis(I_merged, idx, axis=1)
candidate_dict = {}
for i, qid in enumerate(qids):
seen_pids = set()
if qid not in candidate_dict:
candidate_dict[qid] = [0]*1000
j = 0
for pid in sorted_I[i]:
if pid >= 0 and pid not in seen_pids:
candidate_dict[qid][j] = pid
j += 1
seen_pids.add(pid)
allowed, message = quality_checks_qids(ref_dict, candidate_dict)
if message != '':
print(message)
mrr_metrics = compute_metrics(ref_dict, candidate_dict)
mrr = mrr_metrics["MRR @10"]
print(mrr)
return mrr
| 7,984 | 34.807175 | 100 | py |
modir | modir-master/utils/dpr_utils.py | import collections
import sys
sys.path += ['../']
import glob
import logging
import os
from typing import List, Tuple, Dict
import faiss
import pickle
import numpy as np
import unicodedata
import torch
import torch.distributed as dist
from torch import nn
from torch.serialization import default_restore_location
import regex
from transformers import AdamW
from utils.lamb import Lamb
logger = logging.getLogger()
CheckpointState = collections.namedtuple("CheckpointState",
['model_dict', 'optimizer_dict', 'scheduler_dict', 'offset', 'epoch',
'encoder_params'])
def get_encoder_checkpoint_params_names():
return ['do_lower_case', 'pretrained_model_cfg', 'encoder_model_type',
'pretrained_file',
'projection_dim', 'sequence_length']
def get_encoder_params_state(args):
"""
Selects the param values to be saved in a checkpoint, so that a trained model faile can be used for downstream
tasks without the need to specify these parameter again
:return: Dict of params to memorize in a checkpoint
"""
params_to_save = get_encoder_checkpoint_params_names()
r = {}
for param in params_to_save:
r[param] = getattr(args, param)
return r
def set_encoder_params_from_state(state, args):
if not state:
return
params_to_save = get_encoder_checkpoint_params_names()
override_params = [(param, state[param]) for param in params_to_save if param in state and state[param]]
for param, value in override_params:
if hasattr(args, param):
logger.warning('Overriding args parameter value from checkpoint state. Param = %s, value = %s', param,
value)
setattr(args, param, value)
return args
def get_model_obj(model: nn.Module):
return model.module if hasattr(model, 'module') else model
def get_model_file(args, file_prefix) -> str:
out_cp_files = glob.glob(os.path.join(args.output_dir, file_prefix + '*')) if args.output_dir else []
logger.info('Checkpoint files %s', out_cp_files)
model_file = None
if args.model_file and os.path.exists(args.model_file):
model_file = args.model_file
elif len(out_cp_files) > 0:
model_file = max(out_cp_files, key=os.path.getctime)
return model_file
def load_states_from_checkpoint(model_file: str) -> CheckpointState:
logger.info('Reading saved model from %s', model_file)
state_dict = torch.load(model_file, map_location=lambda s, l: default_restore_location(s, 'cpu'))
logger.info('model_state_dict keys %s', state_dict.keys())
return CheckpointState(**state_dict)
def get_optimizer(args, model: nn.Module, weight_decay: float = 0.0, ) -> torch.optim.Optimizer:
no_decay = ['bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
'weight_decay': weight_decay},
{'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
if args.optimizer == "adamW":
return AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
elif args.optimizer == "lamb":
return Lamb(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
else:
raise Exception("optimizer {0} not recognized! Can only be lamb or adamW".format(args.optimizer))
def all_gather_list(data, group=None, max_size=16384):
"""Gathers arbitrary data from all nodes into a list.
Similar to :func:`~torch.distributed.all_gather` but for arbitrary Python
data. Note that *data* must be picklable.
Args:
data (Any): data from the local worker to be gathered on other workers
group (optional): group of the collective
"""
SIZE_STORAGE_BYTES = 4 # int32 to encode the payload size
enc = pickle.dumps(data)
enc_size = len(enc)
if enc_size + SIZE_STORAGE_BYTES > max_size:
raise ValueError(
'encoded data exceeds max_size, this can be fixed by increasing buffer size: {}'.format(enc_size))
rank = dist.get_rank()
world_size = dist.get_world_size()
buffer_size = max_size * world_size
if not hasattr(all_gather_list, '_buffer') or \
all_gather_list._buffer.numel() < buffer_size:
all_gather_list._buffer = torch.cuda.ByteTensor(buffer_size)
all_gather_list._cpu_buffer = torch.ByteTensor(max_size).pin_memory()
buffer = all_gather_list._buffer
buffer.zero_()
cpu_buffer = all_gather_list._cpu_buffer
assert enc_size < 256 ** SIZE_STORAGE_BYTES, 'Encoded object size should be less than {} bytes'.format(
256 ** SIZE_STORAGE_BYTES)
size_bytes = enc_size.to_bytes(SIZE_STORAGE_BYTES, byteorder='big')
cpu_buffer[0:SIZE_STORAGE_BYTES] = torch.ByteTensor(list(size_bytes))
cpu_buffer[SIZE_STORAGE_BYTES: enc_size + SIZE_STORAGE_BYTES] = torch.ByteTensor(list(enc))
start = rank * max_size
size = enc_size + SIZE_STORAGE_BYTES
buffer[start: start + size].copy_(cpu_buffer[:size])
if group is None:
group = dist.group.WORLD
dist.all_reduce(buffer, group=group)
try:
result = []
for i in range(world_size):
out_buffer = buffer[i * max_size: (i + 1) * max_size]
size = int.from_bytes(out_buffer[0:SIZE_STORAGE_BYTES], byteorder='big')
if size > 0:
result.append(pickle.loads(bytes(out_buffer[SIZE_STORAGE_BYTES: size + SIZE_STORAGE_BYTES].tolist())))
return result
except pickle.UnpicklingError:
raise Exception(
'Unable to unpickle data from other workers. all_gather_list requires all '
'workers to enter the function together, so this error usually indicates '
'that the workers have fallen out of sync somehow. Workers can fall out of '
'sync if one of them runs out of memory, or if there are other conditions '
'in your training script that can cause one worker to finish an epoch '
'while other workers are still iterating over their portions of the data.'
)
class DenseHNSWFlatIndexer(object):
"""
Efficient index for retrieval. Note: default settings are for hugh accuracy but also high RAM usage
"""
def __init__(self, vector_sz: int, buffer_size: int = 50000, store_n: int = 512
, ef_search: int = 128, ef_construction: int = 200):
self.buffer_size = buffer_size
self.index_id_to_db_id = []
self.index = None
# IndexHNSWFlat supports L2 similarity only
# so we have to apply DOT -> L2 similairy space conversion with the help of an extra dimension
index = faiss.IndexHNSWFlat(vector_sz + 1, store_n)
index.hnsw.efSearch = ef_search
index.hnsw.efConstruction = ef_construction
self.index = index
self.phi = 0
def index_data(self, data: List[Tuple[object, np.array]]):
n = len(data)
# max norm is required before putting all vectors in the index to convert inner product similarity to L2
if self.phi > 0:
raise RuntimeError('DPR HNSWF index needs to index all data at once,'
'results will be unpredictable otherwise.')
phi = 0
for i, item in enumerate(data):
id, doc_vector = item
norms = (doc_vector ** 2).sum()
phi = max(phi, norms)
logger.info('HNSWF DotProduct -> L2 space phi={}'.format(phi))
self.phi = 0
# indexing in batches is beneficial for many faiss index types
for i in range(0, n, self.buffer_size):
db_ids = [t[0] for t in data[i:i + self.buffer_size]]
vectors = [np.reshape(t[1], (1, -1)) for t in data[i:i + self.buffer_size]]
norms = [(doc_vector ** 2).sum() for doc_vector in vectors]
aux_dims = [np.sqrt(phi - norm) for norm in norms]
hnsw_vectors = [np.hstack((doc_vector, aux_dims[i].reshape(-1, 1))) for i, doc_vector in
enumerate(vectors)]
hnsw_vectors = np.concatenate(hnsw_vectors, axis=0)
self._update_id_mapping(db_ids)
self.index.add(hnsw_vectors)
logger.info('data indexed %d', len(self.index_id_to_db_id))
indexed_cnt = len(self.index_id_to_db_id)
logger.info('Total data indexed %d', indexed_cnt)
def search_knn(self, query_vectors: np.array, top_docs: int) -> List[Tuple[List[object], List[float]]]:
aux_dim = np.zeros(len(query_vectors), dtype='float32')
query_nhsw_vectors = np.hstack((query_vectors, aux_dim.reshape(-1, 1)))
logger.info('query_hnsw_vectors %s', query_nhsw_vectors.shape)
scores, indexes = self.index.search(query_nhsw_vectors, top_docs)
# convert to external ids
db_ids = [[self.index_id_to_db_id[i] for i in query_top_idxs] for query_top_idxs in indexes]
result = [(db_ids[i], scores[i]) for i in range(len(db_ids))]
return result
def _update_id_mapping(self, db_ids: List):
self.index_id_to_db_id.extend(db_ids)
def check_answer(passages, answers, doc_ids, tokenizer):
"""Search through all the top docs to see if they have any of the answers."""
hits = []
for i, doc_id in enumerate(doc_ids):
text = passages[doc_id][0]
hits.append(has_answer(answers, text, tokenizer))
return hits
def has_answer(answers, text, tokenizer):
"""Check if a document contains an answer string.
If `match_type` is string, token matching is done between the text and answer.
If `match_type` is regex, we search the whole text with the regex.
"""
if text is None:
logger.warning("no doc in db")
return False
text = _normalize(text)
# Answer is a list of possible strings
text = tokenizer.tokenize(text).words(uncased=True)
for single_answer in answers:
single_answer = _normalize(single_answer)
single_answer = tokenizer.tokenize(single_answer)
single_answer = single_answer.words(uncased=True)
for i in range(0, len(text) - len(single_answer) + 1):
if single_answer == text[i: i + len(single_answer)]:
return True
return False
class SimpleTokenizer:
ALPHA_NUM = r'[\p{L}\p{N}\p{M}]+'
NON_WS = r'[^\p{Z}\p{C}]'
def __init__(self, **kwargs):
"""
Args:
annotators: None or empty set (only tokenizes).
"""
self._regexp = regex.compile(
'(%s)|(%s)' % (self.ALPHA_NUM, self.NON_WS),
flags=regex.IGNORECASE + regex.UNICODE + regex.MULTILINE
)
if len(kwargs.get('annotators', {})) > 0:
logger.warning('%s only tokenizes! Skipping annotators: %s' %
(type(self).__name__, kwargs.get('annotators')))
self.annotators = set()
def tokenize(self, text):
data = []
matches = [m for m in self._regexp.finditer(text)]
for i in range(len(matches)):
# Get text
token = matches[i].group()
# Get whitespace
span = matches[i].span()
start_ws = span[0]
if i + 1 < len(matches):
end_ws = matches[i + 1].span()[0]
else:
end_ws = span[1]
# Format data
data.append((
token,
text[start_ws: end_ws],
span,
))
return Tokens(data, self.annotators)
def _normalize(text):
return unicodedata.normalize('NFD', text)
class Tokens(object):
"""A class to represent a list of tokenized text."""
TEXT = 0
TEXT_WS = 1
SPAN = 2
POS = 3
LEMMA = 4
NER = 5
def __init__(self, data, annotators, opts=None):
self.data = data
self.annotators = annotators
self.opts = opts or {}
def __len__(self):
"""The number of tokens."""
return len(self.data)
def words(self, uncased=False):
"""Returns a list of the text of each token
Args:
uncased: lower cases text
"""
if uncased:
return [t[self.TEXT].lower() for t in self.data]
else:
return [t[self.TEXT] for t in self.data]
| 12,483 | 35.934911 | 118 | py |
modir | modir-master/utils/modir_utils.py | import os
import sys
import csv
import numpy as np
import faiss
import torch
import torch.distributed as dist
from torch.utils.data import DataLoader
try:
from apex import amp
except ImportError:
print("apex not imported")
from utils.util import (
is_first_worker,
StreamingDataset,
EmbeddingCache,
)
from model.domain_classifier import DomainClassifier, dry_dc_evaluation
from drivers.run_ann_data_gen import StreamInferenceDoc, EvalDevQuery
from data.msmarco_data import GetProcessingFn
import logging
logger = logging.getLogger(__name__)
def compute_total_grad_L2_norm(param_list):
total_norm = 0.0
for p in param_list:
if p.grad is not None:
total_norm += torch.norm(p.grad) ** 2
total_norm = total_norm ** 0.5
return total_norm.item()
def intrain_dev_eval(args, global_step, model, tb_writer, prev_dry_dc_state_dict,
all_datasets=False):
model.eval()
query_embs = []
passage_embs = []
intraindev_data_name = args.intraindev_data_name[:2]
with amp.disable_casts(): # back to fp32
for i_dev, data_name in enumerate(intraindev_data_name):
data_path = args.intraindev_data_dir[i_dev]
dev_evaluation_results = dev_evaluation(
args, data_path, model, return_embs=True)
if is_first_worker():
# query/passage embs obtained by the first worker
# are actually generated by all workers
# see the implementation of StreamInferenceDoc()
dev_result_dict, (query_emb, passage_emb, query_emb2id, passage_emb2id) = dev_evaluation_results
if i_dev <= 1:
query_embs.append(query_emb)
passage_embs.append(passage_emb)
np.save(
os.path.join(args.saved_embedding_dir, f"{data_name}_query-step{global_step}.npy"),
query_emb
)
np.save(
os.path.join(args.saved_embedding_dir, f"{data_name}_passage-step{global_step}.npy"),
passage_emb
)
np.save(
os.path.join(args.saved_embedding_dir, f"{data_name}_query2id-step{global_step}.npy"),
query_emb2id
)
np.save(
os.path.join(args.saved_embedding_dir, f"{data_name}_passage2id-step{global_step}.npy"),
passage_emb2id
)
if i_dev == 1:
dry_dc_model = DomainClassifier(args)
dry_dc_model.to(args.device)
dry_dc_acc, prev_dry_dc_acc, prev_dry_dc_state_dict = dry_dc_evaluation(
args, dry_dc_model, query_embs, passage_embs, prev_dry_dc_state_dict)
tb_writer.add_scalar("dc_acc_dry_Q", float(dry_dc_acc[0]), global_step)
tb_writer.add_scalar("dc_acc_dry_P", float(dry_dc_acc[1]), global_step)
if prev_dry_dc_acc[0] is not None:
tb_writer.add_scalar("dc_acc_prev_dry_Q", float(prev_dry_dc_acc[0]), global_step)
tb_writer.add_scalar("dc_acc_prev_dry_P", float(prev_dry_dc_acc[1]), global_step)
del dry_dc_model
torch.cuda.empty_cache()
print(data_name, dev_result_dict)
for k, v in dev_result_dict.items():
tb_writer.add_scalar(f"{data_name}-{k}", v, global_step)
if args.local_rank != -1:
dist.barrier()
model.train()
return prev_dry_dc_state_dict
def intrain_save_checkpoint(args, global_step,
model, tokenizer, optimizer, scheduler):
if is_first_worker():
# identical with the one from original_drivers/run_ann
output_dir = os.path.join(
args.output_dir, "checkpoint-{}".format(global_step))
if not os.path.exists(output_dir):
os.makedirs(output_dir)
model_to_save = (
model.module if hasattr(model, "module") else model
) # Take care of distributed/parallel training
model_to_save.save_pretrained(output_dir)
tokenizer.save_pretrained(output_dir)
torch.save(args, os.path.join(output_dir, "training_args.bin"))
logger.info("Saving model checkpoint to %s", output_dir)
torch.save(
optimizer.state_dict(),
os.path.join(
output_dir,
"optimizer.pt"))
torch.save(
scheduler.state_dict(),
os.path.join(
output_dir,
"scheduler.pt"))
logger.info(
"Saving optimizer and scheduler states to %s",
output_dir)
if args.local_rank != -1:
dist.barrier()
def build_input_from_batch(args, batch, mode='full', triplet=False):
if triplet: # from ann
if mode == 'query':
inputs = {
"input_ids": batch[0].long(),
"attention_mask": batch[1].long()
}
elif mode == 'pos_doc':
inputs = {
"input_ids": batch[3].long(),
"attention_mask": batch[4].long()
}
elif mode == 'neg_doc':
inputs = {
"input_ids": batch[6].long(),
"attention_mask": batch[7].long()
}
else:
inputs = {
"query_ids": batch[0].long(),
"attention_mask_q": batch[1].long(),
"input_ids_a": batch[3].long(),
"attention_mask_a": batch[4].long(),
"input_ids_b": batch[6].long(),
"attention_mask_b": batch[7].long()
}
else: # from raw data
if mode == 'query':
inputs = {
"input_ids": batch[0].long(),
"attention_mask": batch[1].long()
}
elif mode == 'pos_doc':
inputs = {
"input_ids": batch[2].long(),
"attention_mask": batch[3].long()
}
elif mode == 'neg_doc':
inputs = {
"input_ids": batch[4].long(),
"attention_mask": batch[5].long()
}
elif mode == 'full':
inputs = {
"query_ids": batch[0].long(),
"attention_mask_q": batch[1].long(),
"input_ids_a": batch[2].long(),
"attention_mask_a": batch[3].long(),
"input_ids_b": batch[4].long(),
"attention_mask_b": batch[5].long()
}
return inputs
def get_module(model):
return model.module if hasattr(model, "module") else model
def build_dl_iter_from_file(args, file_obj, process_fn):
file_obj.seek(0)
sds = StreamingDataset(file_obj, process_fn)
dataloader = DataLoader(sds, batch_size=args.per_gpu_train_batch_size, num_workers=0)
iterator = iter(dataloader)
return dataloader, iterator
def get_next(iterator, args, file_obj, process_fn, batch_size):
try:
batch = next(iterator)
assert batch_size == batch[0].shape[0]
except (AssertionError, StopIteration):
# print('Build new iterator')
_, iterator = build_dl_iter_from_file(args, file_obj, process_fn)
batch = next(iterator)
return batch, iterator
def dev_evaluation(args, data_path, model,
return_embs=False):
logger.info("Loading dev query_2_pos_docid")
dev_query_positive_id = {}
query_positive_id_path = os.path.join(data_path, "dev-qrel.tsv")
with open(query_positive_id_path, 'r', encoding='utf8') as f:
tsvreader = csv.reader(f, delimiter="\t")
for [topicid, docid, rel] in tsvreader:
topicid = int(topicid)
docid = int(docid)
if topicid not in dev_query_positive_id:
dev_query_positive_id[topicid] = {}
dev_query_positive_id[topicid][docid] = max(0, int(rel))
old_max_seq_length = args.max_seq_length
args.max_seq_length = 512 # otherwise it crashes
args.rank = args.local_rank
old_max_query_length = args.max_query_length
if 'arguana' in data_path:
args.max_query_length = 512
dev_tmp_ann_data_dir = "../dev_tmp_ann_data"
os.makedirs(dev_tmp_ann_data_dir, exist_ok=True)
logger.info("***** inference of dev query *****")
dev_query_collection_path = os.path.join(data_path, "dev-query")
dev_query_cache = EmbeddingCache(dev_query_collection_path)
with dev_query_cache as emb:
dev_query_embedding, dev_query_embedding2id = StreamInferenceDoc(
args,
model,
GetProcessingFn(args, query=True),
"dev_query_0_",
emb,
output_path=dev_tmp_ann_data_dir,
is_query_inference=True)
logger.info("***** inference of dev passages *****")
dev_passage_collection_path = os.path.join(data_path, "dev-passages")
dev_passage_cache = EmbeddingCache(dev_passage_collection_path)
with dev_passage_cache as emb:
dev_passage_embedding, dev_passage_embedding2id = StreamInferenceDoc(
args,
model,
GetProcessingFn(args, query=False),
"dev_passage_0_",
emb,
output_path=dev_tmp_ann_data_dir,
is_query_inference=False)
args.max_seq_length = old_max_seq_length
args.max_query_length = old_max_query_length
torch.cuda.empty_cache()
if is_first_worker():
# ANN search for dev passages and dev queries
dev_dim = dev_passage_embedding.shape[1]
print('dev passage embedding shape: ' + str(dev_passage_embedding.shape))
faiss.omp_set_num_threads(16)
dev_cpu_index = faiss.IndexFlatIP(dev_dim)
dev_cpu_index.add(dev_passage_embedding)
logger.info("***** Done Dev ANN Index *****")
_, dev_I = dev_cpu_index.search(dev_query_embedding, 100) # I: [number of queries, topk]
result_dict, num_queries_dev = EvalDevQuery(
args, dev_query_embedding2id, dev_passage_embedding2id,
dev_query_positive_id, dev_I)
if return_embs:
return (result_dict,
(dev_query_embedding, dev_passage_embedding, dev_query_embedding2id, dev_passage_embedding2id))
else:
return result_dict
| 10,586 | 36.676157 | 115 | py |
modir | modir-master/utils/util.py | import sys
sys.path += ['../']
import pandas as pd
from sklearn.metrics import roc_curve, auc
import gzip
import copy
import torch
from torch import nn
import torch.distributed as dist
from tqdm import tqdm, trange
import os
from os import listdir
from os.path import isfile, join
import json
import logging
import random
import pytrec_eval
import pickle
import numpy as np
import torch
torch.multiprocessing.set_sharing_strategy('file_system')
from multiprocessing import Process
from torch.utils.data import DataLoader, Dataset, TensorDataset, IterableDataset
import re
from model.models import MSMarcoConfigDict, ALL_MODELS
from typing import List, Set, Dict, Tuple, Callable, Iterable, Any
logger = logging.getLogger(__name__)
class InputFeaturesPair(object):
"""
A single set of features of data.
Args:
input_ids: Indices of input sequence tokens in the vocabulary.
attention_mask: Mask to avoid performing attention on padding token indices.
Mask values selected in ``[0, 1]``:
Usually ``1`` for tokens that are NOT MASKED, ``0`` for MASKED (padded) tokens.
token_type_ids: Segment token indices to indicate first and second portions of the inputs.
label: Label corresponding to the input
"""
def __init__(
self,
input_ids_a,
attention_mask_a=None,
token_type_ids_a=None,
input_ids_b=None,
attention_mask_b=None,
token_type_ids_b=None,
label=None):
self.input_ids_a = input_ids_a
self.attention_mask_a = attention_mask_a
self.token_type_ids_a = token_type_ids_a
self.input_ids_b = input_ids_b
self.attention_mask_b = attention_mask_b
self.token_type_ids_b = token_type_ids_b
self.label = label
def __repr__(self):
return str(self.to_json_string())
def to_dict(self):
"""Serializes this instance to a Python dictionary."""
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
"""Serializes this instance to a JSON string."""
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
def getattr_recursive(obj, name):
for layer in name.split("."):
if hasattr(obj, layer):
obj = getattr(obj, layer)
else:
return None
return obj
def barrier_array_merge(
args,
data_array,
merge_axis=0,
prefix="",
output_path=None,
load_cache=False,
only_load_in_master=False):
# data array: [B, any dimension]
# merge alone one axis
assert output_path is not None
if args.local_rank == -1:
return data_array
if not load_cache:
rank = args.rank
if is_first_worker():
if not os.path.exists(output_path):
os.makedirs(output_path)
dist.barrier() # directory created
pickle_path = os.path.join(
output_path,
"{1}_data_obj_{0}.pb".format(
str(rank),
prefix))
with open(pickle_path, 'wb') as handle:
pickle.dump(data_array, handle, protocol=4)
# make sure all processes wrote their data before first process
# collects it
dist.barrier()
data_array = None
data_list = []
# return empty data
if only_load_in_master:
if not is_first_worker():
dist.barrier()
return None
for i in range(
args.world_size): # TODO: dynamically find the max instead of HardCode
pickle_path = os.path.join(
output_path,
"{1}_data_obj_{0}.pb".format(
str(i),
prefix))
try:
with open(pickle_path, 'rb') as handle:
b = pickle.load(handle)
data_list.append(b)
except BaseException:
continue
data_array_agg = np.concatenate(data_list, axis=merge_axis)
dist.barrier()
return data_array_agg
def pad_input_ids(input_ids, max_length,
pad_on_left=False,
pad_token=0):
padding_length = max_length - len(input_ids)
padding_id = [pad_token] * padding_length
if padding_length <= 0:
input_ids = input_ids[:max_length]
else:
if pad_on_left:
input_ids = padding_id + input_ids
else:
input_ids = input_ids + padding_id
return input_ids
def pad_ids(input_ids, attention_mask, token_type_ids, max_length,
pad_on_left=False,
pad_token=0,
pad_token_segment_id=0,
mask_padding_with_zero=True):
padding_length = max_length - len(input_ids)
padding_id = [pad_token] * padding_length
padding_type = [pad_token_segment_id] * padding_length
padding_attention = [0 if mask_padding_with_zero else 1] * padding_length
if padding_length <= 0:
input_ids = input_ids[:max_length]
attention_mask = attention_mask[:max_length]
token_type_ids = token_type_ids[:max_length]
else:
if pad_on_left:
input_ids = padding_id + input_ids
attention_mask = padding_attention + attention_mask
token_type_ids = padding_type + token_type_ids
else:
input_ids = input_ids + padding_id
attention_mask = attention_mask + padding_attention
token_type_ids = token_type_ids + padding_type
return input_ids, attention_mask, token_type_ids
# to reuse pytrec_eval, id must be string
def convert_to_string_id(result_dict):
string_id_dict = {}
# format [string, dict[string, val]]
for k, v in result_dict.items():
_temp_v = {}
for inner_k, inner_v in v.items():
_temp_v[str(inner_k)] = inner_v
string_id_dict[str(k)] = _temp_v
return string_id_dict
def set_seed(args):
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
def is_first_worker():
return not dist.is_available() or not dist.is_initialized() or dist.get_rank() == 0
def concat_key(all_list, key, axis=0):
return np.concatenate([ele[key] for ele in all_list], axis=axis)
def get_checkpoint_no(checkpoint_path):
nums = re.findall(r'\d+', checkpoint_path)
return int(nums[-1]) if len(nums) > 0 else 0
def get_latest_ann_data(ann_data_path):
ANN_PREFIX = f"ann_training_data_"
if not os.path.exists(ann_data_path):
return -1, None, None
files = list(next(os.walk(ann_data_path))[2])
num_start_pos = len(ANN_PREFIX)
data_no_list = [int(s[num_start_pos:])
for s in files if s[:num_start_pos] == ANN_PREFIX]
if len(data_no_list) > 0:
data_no = max(data_no_list)
try:
with open(os.path.join(ann_data_path, "ann_ndcg_" + str(data_no)), 'r') as f:
ndcg_json = json.load(f)
except FileNotFoundError:
ndcg_json = None
return data_no, os.path.join(
ann_data_path, "ann_training_data_" + str(data_no)), ndcg_json
return -1, None, None
def numbered_byte_file_generator(base_path, file_no, record_size):
for i in range(file_no):
with open('{}_split{}'.format(base_path, i), 'rb') as f:
while True:
b = f.read(record_size)
if not b:
# eof
break
yield b
class EmbeddingCache:
def __init__(self, base_path, seed=-1):
self.base_path = base_path
with open(base_path + '_meta', 'r') as f:
meta = json.load(f)
self.dtype = np.dtype(meta['type'])
self.total_number = meta['total_number']
self.record_size = int(
meta['embedding_size']) * self.dtype.itemsize + 4
if seed >= 0:
self.ix_array = np.random.RandomState(
seed).permutation(self.total_number)
else:
self.ix_array = np.arange(self.total_number)
self.f = None
def open(self):
self.f = open(self.base_path, 'rb')
def close(self):
self.f.close()
def read_single_record(self):
record_bytes = self.f.read(self.record_size)
passage_len = int.from_bytes(record_bytes[:4], 'big')
passage = np.frombuffer(record_bytes[4:], dtype=self.dtype)
return passage_len, passage
def __enter__(self):
self.open()
return self
def __exit__(self, type, value, traceback):
self.close()
def __getitem__(self, key):
if key < 0 or key > self.total_number:
raise IndexError(
"Index {} is out of bound for cached embeddings of size {}".format(
key, self.total_number))
self.f.seek(key * self.record_size)
return self.read_single_record()
def __iter__(self):
self.f.seek(0)
for i in range(self.total_number):
new_ix = self.ix_array[i]
yield self.__getitem__(new_ix)
def __len__(self):
return self.total_number
class StreamingDataset(IterableDataset):
def __init__(self, elements, fn, distributed=True):
super().__init__()
self.elements = elements
self.fn = fn
self.num_replicas=-1
self.distributed = distributed
def __iter__(self):
if dist.is_initialized():
self.num_replicas = dist.get_world_size()
self.rank = dist.get_rank()
else:
pass
# print("Not running in distributed mode")
for i, element in enumerate(self.elements):
if self.distributed and self.num_replicas != -1 and i % self.num_replicas != self.rank:
continue
records = self.fn(element, i)
for rec in records:
yield rec
def tokenize_to_file(args, i, num_process, in_path, out_path, line_fn):
configObj = MSMarcoConfigDict[args.model_type]
tokenizer = configObj.tokenizer_class.from_pretrained(
args.model_name_or_path,
do_lower_case=True,
cache_dir=None,
)
with open(in_path, 'r', encoding='utf-8') if in_path[-2:] != "gz" else gzip.open(in_path, 'rt', encoding='utf8') as in_f,\
open('{}_split{}'.format(out_path, i), 'wb') as out_f:
for idx, line in enumerate(in_f):
if idx % num_process != i:
continue
out_f.write(line_fn(args, line, tokenizer))
def multi_file_process(args, num_process, in_path, out_path, line_fn):
processes = []
for i in range(num_process):
p = Process(
target=tokenize_to_file,
args=(
args,
i,
num_process,
in_path,
out_path,
line_fn,
))
processes.append(p)
p.start()
for p in processes:
p.join()
def all_gather(data):
"""
Run all_gather on arbitrary picklable data (not necessarily tensors)
Args:
data: any picklable object
Returns:
list[data]: list of data gathered from each rank
"""
if not dist.is_initialized() or dist.get_world_size() == 1:
return [data]
world_size = dist.get_world_size()
# serialized to a Tensor
buffer = pickle.dumps(data)
storage = torch.ByteStorage.from_buffer(buffer)
tensor = torch.ByteTensor(storage).to("cuda")
# obtain Tensor size of each rank
local_size = torch.LongTensor([tensor.numel()]).to("cuda")
size_list = [torch.LongTensor([0]).to("cuda") for _ in range(world_size)]
dist.all_gather(size_list, local_size)
size_list = [int(size.item()) for size in size_list]
max_size = max(size_list)
# receiving Tensor from all ranks
# we pad the tensor because torch all_gather does not support
# gathering tensors of different shapes
tensor_list = []
for _ in size_list:
tensor_list.append(torch.ByteTensor(size=(max_size,)).to("cuda"))
if local_size != max_size:
padding = torch.ByteTensor(size=(max_size - local_size,)).to("cuda")
tensor = torch.cat((tensor, padding), dim=0)
dist.all_gather(tensor_list, tensor)
data_list = []
for size, tensor in zip(size_list, tensor_list):
buffer = tensor.cpu().numpy().tobytes()[:size]
data_list.append(pickle.loads(buffer))
return data_list
| 12,596 | 29.354217 | 126 | py |
modir | modir-master/utils/msmarco_eval.py | """
This is official eval script opensourced on MSMarco site (not written or owned by us)
This module computes evaluation metrics for MSMARCO dataset on the ranking task.
Command line:
python msmarco_eval_ranking.py <path_to_reference_file> <path_to_candidate_file>
Creation Date : 06/12/2018
Last Modified : 1/21/2019
Authors : Daniel Campos <[email protected]>, Rutger van Haasteren <[email protected]>
"""
import sys
import statistics
from collections import Counter
MaxMRRRank = 10
def load_reference_from_stream(f):
"""Load Reference reference relevant passages
Args:f (stream): stream to load.
Returns:qids_to_relevant_passageids (dict): dictionary mapping from query_id (int) to relevant passages (list of ints).
"""
qids_to_relevant_passageids = {}
for l in f:
try:
l = l.strip().split('\t')
qid = int(l[0])
if qid in qids_to_relevant_passageids:
pass
else:
qids_to_relevant_passageids[qid] = []
qids_to_relevant_passageids[qid].append(int(l[2]))
except:
raise IOError('\"%s\" is not valid format' % l)
return qids_to_relevant_passageids
def load_reference(path_to_reference):
"""Load Reference reference relevant passages
Args:path_to_reference (str): path to a file to load.
Returns:qids_to_relevant_passageids (dict): dictionary mapping from query_id (int) to relevant passages (list of ints).
"""
with open(path_to_reference,'r') as f:
qids_to_relevant_passageids = load_reference_from_stream(f)
return qids_to_relevant_passageids
def load_candidate_from_stream(f):
"""Load candidate data from a stream.
Args:f (stream): stream to load.
Returns:qid_to_ranked_candidate_passages (dict): dictionary mapping from query_id (int) to a list of 1000 passage ids(int) ranked by relevance and importance
"""
qid_to_ranked_candidate_passages = {}
for l in f:
try:
l = l.strip().split('\t')
qid = int(l[0])
pid = int(l[1])
rank = int(l[2])
if qid in qid_to_ranked_candidate_passages:
pass
else:
# By default, all PIDs in the list of 1000 are 0. Only override those that are given
tmp = [0] * 1000
qid_to_ranked_candidate_passages[qid] = tmp
qid_to_ranked_candidate_passages[qid][rank-1]=pid
except:
raise IOError('\"%s\" is not valid format' % l)
return qid_to_ranked_candidate_passages
def load_candidate(path_to_candidate):
"""Load candidate data from a file.
Args:path_to_candidate (str): path to file to load.
Returns:qid_to_ranked_candidate_passages (dict): dictionary mapping from query_id (int) to a list of 1000 passage ids(int) ranked by relevance and importance
"""
with open(path_to_candidate,'r') as f:
qid_to_ranked_candidate_passages = load_candidate_from_stream(f)
return qid_to_ranked_candidate_passages
def quality_checks_qids(qids_to_relevant_passageids, qids_to_ranked_candidate_passages):
"""Perform quality checks on the dictionaries
Args:
p_qids_to_relevant_passageids (dict): dictionary of query-passage mapping
Dict as read in with load_reference or load_reference_from_stream
p_qids_to_ranked_candidate_passages (dict): dictionary of query-passage candidates
Returns:
bool,str: Boolean whether allowed, message to be shown in case of a problem
"""
message = ''
allowed = True
# Create sets of the QIDs for the submitted and reference queries
candidate_set = set(qids_to_ranked_candidate_passages.keys())
ref_set = set(qids_to_relevant_passageids.keys())
# Check that we do not have multiple passages per query
for qid in qids_to_ranked_candidate_passages:
# Remove all zeros from the candidates
duplicate_pids = set([item for item, count in Counter(qids_to_ranked_candidate_passages[qid]).items() if count > 1])
if len(duplicate_pids-set([0])) > 0:
message = "Cannot rank a passage multiple times for a single query. QID={qid}, PID={pid}".format(
qid=qid, pid=list(duplicate_pids)[0])
allowed = False
return allowed, message
def compute_metrics(qids_to_relevant_passageids, qids_to_ranked_candidate_passages):
"""Compute MRR metric
Args:
p_qids_to_relevant_passageids (dict): dictionary of query-passage mapping
Dict as read in with load_reference or load_reference_from_stream
p_qids_to_ranked_candidate_passages (dict): dictionary of query-passage candidates
Returns:
dict: dictionary of metrics {'MRR': <MRR Score>}
"""
all_scores = {}
MRR = 0
qids_with_relevant_passages = 0
ranking = []
for qid in qids_to_ranked_candidate_passages:
if qid in qids_to_relevant_passageids:
ranking.append(0)
target_pid = qids_to_relevant_passageids[qid]
candidate_pid = qids_to_ranked_candidate_passages[qid]
for i in range(0,MaxMRRRank):
if candidate_pid[i] in target_pid:
MRR += 1/(i + 1)
ranking.pop()
ranking.append(i+1)
break
if len(ranking) == 0:
raise IOError("No matching QIDs found. Are you sure you are scoring the evaluation set?")
MRR = MRR/len(qids_to_relevant_passageids)
all_scores['MRR @10'] = MRR
all_scores['QueriesRanked'] = len(qids_to_ranked_candidate_passages)
return all_scores
def compute_metrics_from_files(path_to_reference, path_to_candidate, perform_checks=True):
"""Compute MRR metric
Args:
p_path_to_reference_file (str): path to reference file.
Reference file should contain lines in the following format:
QUERYID\tPASSAGEID
Where PASSAGEID is a relevant passage for a query. Note QUERYID can repeat on different lines with different PASSAGEIDs
p_path_to_candidate_file (str): path to candidate file.
Candidate file sould contain lines in the following format:
QUERYID\tPASSAGEID1\tRank
If a user wishes to use the TREC format please run the script with a -t flag at the end. If this flag is used the expected format is
QUERYID\tITER\tDOCNO\tRANK\tSIM\tRUNID
Where the values are separated by tabs and ranked in order of relevance
Returns:
dict: dictionary of metrics {'MRR': <MRR Score>}
"""
qids_to_relevant_passageids = load_reference(path_to_reference)
qids_to_ranked_candidate_passages = load_candidate(path_to_candidate)
if perform_checks:
allowed, message = quality_checks_qids(qids_to_relevant_passageids, qids_to_ranked_candidate_passages)
if message != '': print(message)
return compute_metrics(qids_to_relevant_passageids, qids_to_ranked_candidate_passages)
def main():
"""Command line:
python msmarco_eval_ranking.py <path_to_reference_file> <path_to_candidate_file>
"""
print("Eval Started")
if len(sys.argv) == 3:
path_to_reference = sys.argv[1]
path_to_candidate = sys.argv[2]
metrics = compute_metrics_from_files(path_to_reference, path_to_candidate)
print('#####################')
for metric in sorted(metrics):
print('{}: {}'.format(metric, metrics[metric]))
print('#####################')
else:
print('Usage: msmarco_eval_ranking.py <reference ranking> <candidate ranking>')
exit()
if __name__ == '__main__':
main() | 7,724 | 40.756757 | 161 | py |
modir | modir-master/utils/lamb.py | """Lamb optimizer."""
import collections
import math
import torch
from tensorboardX import SummaryWriter
from torch.optim import Optimizer
def log_lamb_rs(optimizer: Optimizer, event_writer: SummaryWriter, token_count: int):
"""Log a histogram of trust ratio scalars in across layers."""
results = collections.defaultdict(list)
for group in optimizer.param_groups:
for p in group['params']:
state = optimizer.state[p]
for i in ('weight_norm', 'adam_norm', 'trust_ratio'):
if i in state:
results[i].append(state[i])
for k, v in results.items():
event_writer.add_histogram(f'lamb/{k}', torch.tensor(v), token_count)
class Lamb(Optimizer):
r"""Implements Lamb algorithm.
It has been proposed in `Large Batch Optimization for Deep Learning: Training BERT in 76 minutes`_.
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float, optional): learning rate (default: 1e-3)
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its square (default: (0.9, 0.999))
eps (float, optional): term added to the denominator to improve
numerical stability (default: 1e-8)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
adam (bool, optional): always use trust ratio = 1, which turns this into
Adam. Useful for comparison purposes.
.. _Large Batch Optimization for Deep Learning: Training BERT in 76 minutes:
https://arxiv.org/abs/1904.00962
"""
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-6,
weight_decay=0, adam=False):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= betas[0] < 1.0:
raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))
defaults = dict(lr=lr, betas=betas, eps=eps,
weight_decay=weight_decay)
self.adam = adam
super(Lamb, self).__init__(params, defaults)
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('Lamb does not support sparse gradients, consider SparseAdam instad.')
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros_like(p.data)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
beta1, beta2 = group['betas']
state['step'] += 1
# Decay the first and second moment running average coefficient
# m_t
exp_avg.mul_(beta1).add_(1 - beta1, grad)
# v_t
exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
# Paper v3 does not use debiasing.
# Apply bias to lr to avoid broadcast.
step_size = group['lr'] # * math.sqrt(bias_correction2) / bias_correction1
weight_norm = p.data.pow(2).sum().sqrt().clamp(0, 10)
adam_step = exp_avg / exp_avg_sq.sqrt().add(group['eps'])
if group['weight_decay'] != 0:
adam_step.add_(group['weight_decay'], p.data)
adam_norm = adam_step.pow(2).sum().sqrt()
if weight_norm == 0 or adam_norm == 0:
trust_ratio = 1
else:
trust_ratio = weight_norm / adam_norm
state['weight_norm'] = weight_norm
state['adam_norm'] = adam_norm
state['trust_ratio'] = trust_ratio
if self.adam:
trust_ratio = 1
p.data.add_(-step_size * trust_ratio, adam_step)
return loss
| 4,887 | 38.419355 | 109 | py |
modir | modir-master/data/process_fn.py | import torch
def pad_ids(input_ids, attention_mask, token_type_ids, max_length, pad_token, mask_padding_with_zero, pad_token_segment_id, pad_on_left=False):
padding_length = max_length - len(input_ids)
if pad_on_left:
input_ids = ([pad_token] * padding_length) + input_ids
attention_mask = ([0 if mask_padding_with_zero else 1]
* padding_length) + attention_mask
token_type_ids = ([pad_token_segment_id] *
padding_length) + token_type_ids
else:
input_ids += [pad_token] * padding_length
attention_mask += [0 if mask_padding_with_zero else 1] * padding_length
token_type_ids += [pad_token_segment_id] * padding_length
return input_ids, attention_mask, token_type_ids
def dual_process_fn(line, i, tokenizer, args):
features = []
cells = line.split("\t")
if len(cells) == 2:
# this is for training and validation
# id, passage = line
mask_padding_with_zero = True
pad_token_segment_id = 0
pad_on_left = False
text = cells[1].strip()
input_id_a = tokenizer.encode(
text, add_special_tokens=True, max_length=args.max_seq_length,)
token_type_ids_a = [0] * len(input_id_a)
attention_mask_a = [
1 if mask_padding_with_zero else 0] * len(input_id_a)
input_id_a, attention_mask_a, token_type_ids_a = pad_ids(
input_id_a, attention_mask_a, token_type_ids_a, args.max_seq_length, tokenizer.pad_token_id, mask_padding_with_zero, pad_token_segment_id, pad_on_left)
features += [torch.tensor(input_id_a, dtype=torch.int), torch.tensor(
attention_mask_a, dtype=torch.bool), torch.tensor(token_type_ids_a, dtype=torch.uint8)]
qid = int(cells[0])
features.append(qid)
else:
raise Exception(
"Line doesn't have correct length: {0}. Expected 2.".format(str(len(cells))))
return [features]
def triple_process_fn(line, i, tokenizer, args):
features = []
cells = line.split("\t")
if len(cells) == 3:
# this is for training and validation
# query, positive_passage, negative_passage = line
mask_padding_with_zero = True
pad_token_segment_id = 0
pad_on_left = False
for j, text in enumerate(cells):
max_len = args.max_query_length if j==0 else args.max_seq_length
input_id_a = tokenizer.encode(
text.strip(), add_special_tokens=True, max_length=max_len,)
token_type_ids_a = [0] * len(input_id_a)
attention_mask_a = [
1 if mask_padding_with_zero else 0] * len(input_id_a)
input_id_a, attention_mask_a, token_type_ids_a = pad_ids(
input_id_a, attention_mask_a, token_type_ids_a, max_len, tokenizer.pad_token_id, mask_padding_with_zero, pad_token_segment_id, pad_on_left)
features += [torch.tensor(input_id_a, dtype=torch.int),
torch.tensor(attention_mask_a, dtype=torch.bool)]
else:
raise Exception(
"Line doesn't have correct length: {0}. Expected 3.".format(str(len(cells))))
return [features]
def triple2dual_process_fn(line, i, tokenizer, args):
ret = []
cells = line.split("\t")
if len(cells) == 3:
# this is for training and validation
# query, positive_passage, negative_passage = line
# return 2 entries per line, 1 pos + 1 neg
mask_padding_with_zero = True
pad_token_segment_id = 0
pad_on_left = False
pos_feats = []
neg_feats = []
for i, text in enumerate(cells):
input_id_a = tokenizer.encode(
text.strip(), add_special_tokens=True, max_length=args.max_seq_length,)
token_type_ids_a = [0] * len(input_id_a)
attention_mask_a = [
1 if mask_padding_with_zero else 0] * len(input_id_a)
input_id_a, attention_mask_a, token_type_ids_a = pad_ids(
input_id_a, attention_mask_a, token_type_ids_a, args.max_seq_length, tokenizer.pad_token_id, mask_padding_with_zero, pad_token_segment_id, pad_on_left)
if i == 0:
pos_feats += [torch.tensor(input_id_a, dtype=torch.int),
torch.tensor(attention_mask_a, dtype=torch.bool)]
neg_feats += [torch.tensor(input_id_a, dtype=torch.int),
torch.tensor(attention_mask_a, dtype=torch.bool)]
elif i == 1:
pos_feats += [torch.tensor(input_id_a, dtype=torch.int),
torch.tensor(attention_mask_a, dtype=torch.bool), 1]
else:
neg_feats += [torch.tensor(input_id_a, dtype=torch.int),
torch.tensor(attention_mask_a, dtype=torch.bool), 0]
ret = [pos_feats, neg_feats]
else:
raise Exception(
"Line doesn't have correct length: {0}. Expected 3.".format(str(len(cells))))
return ret
| 5,071 | 43.884956 | 167 | py |
modir | modir-master/data/msmarco_data.py | import sys
import os
import torch
sys.path += ['../']
import gzip
import pickle
from utils.util import pad_input_ids, multi_file_process, numbered_byte_file_generator, EmbeddingCache
import csv
from model.models import MSMarcoConfigDict, ALL_MODELS
from torch.utils.data import DataLoader, Dataset, TensorDataset, IterableDataset, get_worker_info
import numpy as np
from os import listdir
from os.path import isfile, join
import argparse
import json
def write_query_rel(args, pid2offset, query_file, positive_id_file, out_query_file, out_id_file,
splits=32):
print(
"Writing query files " +
str(out_query_file) +
" and " +
str(out_id_file))
query_positive_id = set()
query_positive_id_path = os.path.join(
args.data_dir,
positive_id_file,
)
print("Loading query_2_pos_docid")
with gzip.open(query_positive_id_path, 'rt', encoding='utf8') if positive_id_file[-2:] == "gz" else open(query_positive_id_path, 'r', encoding='utf8') as f:
if args.data_type == 0:
tsvreader = csv.reader(f, delimiter=" ")
else:
tsvreader = csv.reader(f, delimiter="\t")
for [topicid, _, docid, rel] in tsvreader:
query_positive_id.add(int(topicid))
query_collection_path = os.path.join(
args.data_dir,
query_file,
)
out_query_path = os.path.join(
args.out_data_dir,
out_query_file,
)
qid2offset = {}
print('start query file split processing')
multi_file_process(
args,
splits,
query_collection_path,
out_query_path,
QueryPreprocessingFn)
print('start merging splits')
idx = 0
with open(out_query_path, 'wb') as f:
for record in numbered_byte_file_generator(
out_query_path, splits, 8 + 4 + args.max_query_length * 4):
q_id = int.from_bytes(record[:8], 'big')
if q_id not in query_positive_id:
# exclude the query as it is not in label set
continue
f.write(record[8:])
qid2offset[q_id] = idx
idx += 1
if idx < 3:
print(str(idx) + " " + str(q_id))
qid2offset_path = os.path.join(
args.out_data_dir,
"qid2offset.pickle",
)
with open(qid2offset_path, 'wb') as handle:
pickle.dump(qid2offset, handle, protocol=4)
print("done saving qid2offset")
print("Total lines written: " + str(idx))
meta = {'type': 'int32', 'total_number': idx,
'embedding_size': args.max_query_length}
with open(out_query_path + "_meta", 'w') as f:
json.dump(meta, f)
embedding_cache = EmbeddingCache(out_query_path)
print("First line")
with embedding_cache as emb:
print(emb[0])
out_id_path = os.path.join(
args.out_data_dir,
out_id_file,
)
print("Writing qrels")
with gzip.open(query_positive_id_path, 'rt', encoding='utf8') if positive_id_file[-2:] == "gz" else open(query_positive_id_path, 'r', encoding='utf8') as f, \
open(out_id_path, "w", encoding='utf-8') as out_id:
if args.data_type == 0:
tsvreader = csv.reader(f, delimiter=" ")
else:
tsvreader = csv.reader(f, delimiter="\t")
out_line_count = 0
for [topicid, _, docid, rel] in tsvreader:
topicid = int(topicid)
if args.data_type == 0:
docid = int(docid[1:])
else:
docid = int(docid)
out_id.write(str(qid2offset[topicid]) +
"\t" +
str(pid2offset[docid]) +
"\t" +
rel +
"\n")
out_line_count += 1
print("Total lines written: " + str(out_line_count))
def preprocess(args):
pid2offset = {}
if args.data_type == 0:
in_passage_path = os.path.join(
args.data_dir,
"msmarco-docs.tsv",
)
else:
in_passage_path = os.path.join(
args.data_dir,
"collection.tsv",
)
out_passage_path = os.path.join(
args.out_data_dir,
"passages",
)
if os.path.exists(out_passage_path):
print("preprocessed data already exist, exit preprocessing")
return
out_line_count = 0
print('start passage file split processing')
multi_file_process(
args,
32,
in_passage_path,
out_passage_path,
PassagePreprocessingFn)
print('start merging splits')
with open(out_passage_path, 'wb') as f:
for idx, record in enumerate(numbered_byte_file_generator(
out_passage_path, 32, 8 + 4 + args.max_seq_length * 4)):
p_id = int.from_bytes(record[:8], 'big')
f.write(record[8:])
pid2offset[p_id] = idx
if idx < 3:
print(str(idx) + " " + str(p_id))
out_line_count += 1
print("Total lines written: " + str(out_line_count))
meta = {
'type': 'int32',
'total_number': out_line_count,
'embedding_size': args.max_seq_length}
with open(out_passage_path + "_meta", 'w') as f:
json.dump(meta, f)
embedding_cache = EmbeddingCache(out_passage_path)
print("First line")
with embedding_cache as emb:
print(emb[0])
pid2offset_path = os.path.join(
args.out_data_dir,
"pid2offset.pickle",
)
with open(pid2offset_path, 'wb') as handle:
pickle.dump(pid2offset, handle, protocol=4)
print("done saving pid2offset")
if args.data_type == 0:
write_query_rel(
args,
pid2offset,
"msmarco-doctrain-queries.tsv",
"msmarco-doctrain-qrels.tsv",
"train-query",
"train-qrel.tsv")
write_query_rel(
args,
pid2offset,
"msmarco-test2019-queries.tsv",
"2019qrels-docs.txt",
"dev-query",
"dev-qrel.tsv")
else:
write_query_rel(
args,
pid2offset,
"queries.train.tsv",
"qrels.train.tsv",
"train-query",
"train-qrel.tsv")
write_query_rel(
args,
pid2offset,
"queries.dev.small.tsv",
"qrels.dev.small.tsv",
"dev-query",
"dev-qrel.tsv")
def preprocess_treccovid(args):
splits = 8
pid2offset = {}
if args.data_type == 0:
in_passage_path = os.path.join(
args.data_dir,
"msmarco-docs.tsv",
)
else:
in_passage_path = os.path.join(
args.data_dir,
"collection.tsv",
)
out_passage_path = os.path.join(
args.out_data_dir,
"passages",
)
if os.path.exists(out_passage_path):
print("preprocessed data already exist, exit preprocessing")
return
out_line_count = 0
print('start passage file split processing')
multi_file_process(
args,
splits,
in_passage_path,
out_passage_path,
PassagePreprocessingFn)
print('start merging splits')
with open(out_passage_path, 'wb') as f:
for idx, record in enumerate(numbered_byte_file_generator(
out_passage_path, splits, 8 + 4 + args.max_seq_length * 4)):
p_id = int.from_bytes(record[:8], 'big')
f.write(record[8:])
pid2offset[p_id] = idx
if idx < 3:
print(str(idx) + " " + str(p_id))
out_line_count += 1
print("Total lines written: " + str(out_line_count))
meta = {
'type': 'int32',
'total_number': out_line_count,
'embedding_size': args.max_seq_length}
with open(out_passage_path + "_meta", 'w') as f:
json.dump(meta, f)
embedding_cache = EmbeddingCache(out_passage_path)
print("First line")
with embedding_cache as emb:
print(emb[0])
pid2offset_path = os.path.join(
args.out_data_dir,
"pid2offset.pickle",
)
with open(pid2offset_path, 'wb') as handle:
pickle.dump(pid2offset, handle, protocol=4)
print("done saving pid2offset")
if args.data_type == 0:
write_query_rel(
args,
pid2offset,
"msmarco-doctrain-queries.tsv",
"msmarco-doctrain-qrels.tsv",
"train-query",
"train-qrel.tsv")
write_query_rel(
args,
pid2offset,
"msmarco-test2019-queries.tsv",
"2019qrels-docs.txt",
"dev-query",
"dev-qrel.tsv")
else:
write_query_rel(
args,
pid2offset,
"queries.tsv",
"qrels.tsv",
"train-query",
"train-qrel.tsv",
splits=splits)
# ^ using the same input as dev below
# but producing train as output (who knows what's needed in the next step)
write_query_rel(
args,
pid2offset,
"queries.tsv",
"qrels.tsv",
"dev-query",
"dev-qrel.tsv",
splits=splits)
def PassagePreprocessingFn(args, line, tokenizer):
if args.data_type == 0:
line_arr = line.split('\t')
p_id = int(line_arr[0][1:]) # remove "D"
url = line_arr[1].rstrip()
title = line_arr[2].rstrip()
p_text = line_arr[3].rstrip()
if not args.model_type == "seeddot_nll":
full_text = url + "<sep>" + title + "<sep>" + p_text
else:
full_text = url + " " + tokenizer.sep_token + " " + title + " " + tokenizer.sep_token+" " + p_text
# keep only first 10000 characters, should be sufficient for any
# experiment that uses less than 500 - 1k tokens
full_text = full_text[:args.max_doc_character]
else:
line = line.strip()
line_arr = line.split('\t')
p_id = int(line_arr[0])
p_text = line_arr[1].rstrip()
# keep only first 10000 characters, should be sufficient for any
# experiment that uses less than 500 - 1k tokens
full_text = p_text[:args.max_doc_character]
passage = tokenizer.encode(
full_text,
add_special_tokens=True,
max_length=args.max_seq_length,
)
passage_len = min(len(passage), args.max_seq_length)
if not args.model_type == "seeddot_nll":
input_id_b = pad_input_ids(passage, args.max_seq_length)
else:
input_id_b = pad_input_ids(passage, args.max_seq_length, pad_token=tokenizer.pad_token_id)
return p_id.to_bytes(8,'big') + passage_len.to_bytes(4,'big') + np.array(input_id_b,np.int32).tobytes()
def QueryPreprocessingFn(args, line, tokenizer):
line_arr = line.split('\t')
q_id = int(line_arr[0])
passage = tokenizer.encode(
line_arr[1].rstrip(),
add_special_tokens=True,
max_length=args.max_query_length)
passage_len = min(len(passage), args.max_query_length)
if not args.model_type == "seeddot_nll":
input_id_b = pad_input_ids(passage, args.max_query_length)
else:
input_id_b = pad_input_ids(passage, args.max_query_length, pad_token=tokenizer.pad_token_id)
return q_id.to_bytes(8,'big') + passage_len.to_bytes(4,'big') + np.array(input_id_b,np.int32).tobytes()
def GetProcessingFn(args, query=False, tgd=False):
def fn(vals, i):
passage_len, passage = vals
if not query:
max_len = args.max_seq_length
else:
if tgd and args.max_query_length_tgd is not None:
max_len = args.max_query_length_tgd
else:
max_len = args.max_query_length
pad_len = max(0, max_len - passage_len)
token_type_ids = ([0] if query else [1]) * passage_len + [0] * pad_len
attention_mask = [1] * passage_len + [0] * pad_len
passage_collection = [(i, passage, attention_mask, token_type_ids)]
query2id_tensor = torch.tensor(
[f[0] for f in passage_collection], dtype=torch.long)
all_input_ids_a = torch.tensor(
[f[1] for f in passage_collection], dtype=torch.int)
all_attention_mask_a = torch.tensor(
[f[2] for f in passage_collection], dtype=torch.bool)
all_token_type_ids_a = torch.tensor(
[f[3] for f in passage_collection], dtype=torch.uint8)
dataset = TensorDataset(
all_input_ids_a,
all_attention_mask_a,
all_token_type_ids_a,
query2id_tensor)
return [ts for ts in dataset]
return fn
def GetTrainingDataProcessingFn(args, query_cache, passage_cache):
def fn(line, i):
line_arr = line.split('\t')
qid = int(line_arr[0])
pos_pid = int(line_arr[1])
neg_pids = line_arr[2].split(',')
neg_pids = [int(neg_pid) for neg_pid in neg_pids]
all_input_ids_a = []
all_attention_mask_a = []
query_data = GetProcessingFn(
args, query=True)(
query_cache[qid], qid)[0]
pos_data = GetProcessingFn(
args, query=False)(
passage_cache[pos_pid], pos_pid)[0]
pos_label = torch.tensor(1, dtype=torch.long)
neg_label = torch.tensor(0, dtype=torch.long)
for neg_pid in neg_pids:
neg_data = GetProcessingFn(
args, query=False)(
passage_cache[neg_pid], neg_pid)[0]
yield (query_data[0], query_data[1], query_data[2], pos_data[0], pos_data[1], pos_data[2], pos_label)
yield (query_data[0], query_data[1], query_data[2], neg_data[0], neg_data[1], neg_data[2], neg_label)
return fn
def GetTripletTrainingDataProcessingFn(args, query_cache, passage_cache, tgd=False):
def fn(line, i):
line_arr = line.split('\t')
qid = int(line_arr[0])
pos_pid = int(line_arr[1])
neg_pids = line_arr[2].split(',')
neg_pids = [int(neg_pid) for neg_pid in neg_pids]
all_input_ids_a = []
all_attention_mask_a = []
query_data = GetProcessingFn(
args, query=True, tgd=tgd)(
query_cache[qid], qid)[0]
pos_data = GetProcessingFn(
args, query=False)(
passage_cache[pos_pid], pos_pid)[0]
for neg_pid in neg_pids:
neg_data = GetProcessingFn(
args, query=False)(
passage_cache[neg_pid], neg_pid)[0]
yield (query_data[0], query_data[1], query_data[2], pos_data[0], pos_data[1], pos_data[2],
neg_data[0], neg_data[1], neg_data[2], qid)
return fn
def get_arguments():
parser = argparse.ArgumentParser()
parser.add_argument(
"--data_dir",
default=None,
type=str,
required=True,
help="The input data dir",
)
parser.add_argument(
"--out_data_dir",
default=None,
type=str,
required=True,
help="The output data dir",
)
parser.add_argument(
"--model_type",
default=None,
type=str,
required=True,
help="Model type selected in the list: " +
", ".join(
MSMarcoConfigDict.keys()),
)
parser.add_argument(
"--model_name_or_path",
default=None,
type=str,
required=True,
help="Path to pre-trained model or shortcut name selected in the list: " +
", ".join(ALL_MODELS),
)
parser.add_argument(
"--max_seq_length",
default=128,
type=int,
help="The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded.",
)
parser.add_argument(
"--max_query_length",
default=64,
type=int,
help="The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded.",
)
parser.add_argument(
"--max_doc_character",
default=10000,
type=int,
help="used before tokenizer to save tokenizer latency",
)
parser.add_argument(
"--data_type",
default=0,
type=int,
help="0 for doc, 1 for passage",
)
parser.add_argument(
"--beir_dataset",
action='store_true',
help="use the new preprocess function instead",
)
args = parser.parse_args()
return args
def main():
args = get_arguments()
if not os.path.exists(args.out_data_dir):
os.makedirs(args.out_data_dir)
if not args.beir_dataset:
preprocess(args)
else:
preprocess_treccovid(args)
if __name__ == '__main__':
main()
| 16,967 | 29.085106 | 162 | py |
modir | modir-master/data/DPR_data.py | from os.path import join
import sys
sys.path += ['../']
import argparse
import json
import os
import random
import numpy as np
import torch
from torch.utils.data import Dataset, TensorDataset
from model.models import MSMarcoConfigDict, ALL_MODELS
import csv
from utils.util import multi_file_process, numbered_byte_file_generator, EmbeddingCache
import pickle
def normalize_question(question: str) -> str:
if question[-1] == '?':
question = question[:-1]
return question
def write_qas_query(args, qas_file, out_query_file):
print("Writing qas query files " + str(out_query_file))
print("print",args.answer_dir,qas_file)
qas_path = os.path.join(
args.answer_dir,
qas_file,
)
out_query_path = os.path.join(
args.out_data_dir,
out_query_file ,
)
configObj = MSMarcoConfigDict[args.model_type]
tokenizer = configObj.tokenizer_class.from_pretrained(
args.model_name_or_path,
do_lower_case=True,
cache_dir=None,
)
qid = 0
with open(qas_path, "r", encoding="utf-8") as f, open(out_query_path, "wb") as out_query:
reader = csv.reader(f, delimiter='\t')
for row in reader:
question = normalize_question(row[0])
out_query.write(QueryPreprocessingFn(args, qid, question, tokenizer))
qid += 1
meta = {'type': 'int32', 'total_number': qid, 'embedding_size': args.max_seq_length}
with open(out_query_path + "_meta", 'w') as f:
json.dump(meta, f)
def write_query_rel(args, pid2offset, query_file, out_query_file, out_ann_file, out_train_file, passage_id_name="passage_id"):
print("Writing query files " + str(out_query_file) + " and " + str(out_ann_file))
query_path = os.path.join(
args.question_dir,
query_file,
)
with open(query_path, 'r', encoding="utf-8") as f:
data = json.load(f)
print('Aggregated data size: {}'.format(len(data)))
data = [r for r in data if len(r['positive_ctxs']) > 0]
print('Total cleaned data size: {}'.format(len(data)))
data = [r for r in data if len(r['hard_negative_ctxs']) > 0]
print('Total cleaned data size: {}'.format(len(data)))
out_query_path = os.path.join(
args.out_data_dir,
out_query_file ,
)
out_ann_file = os.path.join(
args.out_data_dir,
out_ann_file ,
)
out_training_path = os.path.join(
args.out_data_dir,
out_train_file ,
)
qid = 0
configObj = MSMarcoConfigDict[args.model_type]
tokenizer = configObj.tokenizer_class.from_pretrained(
args.model_name_or_path,
do_lower_case=True,
cache_dir=None,
)
with open(out_query_path, "wb") as out_query, \
open(out_ann_file, "w", encoding='utf-8') as out_ann, \
open(out_training_path, "w", encoding='utf-8') as out_training:
for sample in data:
positive_ctxs = sample['positive_ctxs']
neg_ctxs = sample['hard_negative_ctxs']
question = normalize_question(sample['question'])
first_pos_pid = pid2offset[int(positive_ctxs[0][passage_id_name])]
neg_pids = [str(pid2offset[int(neg_ctx[passage_id_name])]) for neg_ctx in neg_ctxs]
out_ann.write("{}\t{}\t{}\n".format(qid, first_pos_pid, sample["answers"]))
out_training.write("{}\t{}\t{}\n".format(qid, first_pos_pid, ','.join(neg_pids)))
out_query.write(QueryPreprocessingFn(args, qid, question, tokenizer))
qid += 1
print("Total lines written: " + str(qid))
meta = {'type': 'int32', 'total_number': qid, 'embedding_size': args.max_seq_length}
with open(out_query_path + "_meta", 'w') as f:
json.dump(meta, f)
embedding_cache = EmbeddingCache(out_query_path)
print("First line")
with embedding_cache as emb:
print(emb[0])
def write_mapping(args, id2offset, out_name):
out_path = os.path.join(
args.out_data_dir,
out_name ,
)
with open(out_path, 'w') as f:
for item in id2offset.items():
f.write("{}\t{}\n".format(item[0], item[1]))
def load_mapping(data_dir, out_name):
out_path = os.path.join(
data_dir,
out_name ,
)
pid2offset = {}
offset2pid = {}
with open(out_path, 'r') as f:
for line in f.readlines():
line_arr = line.split('\t')
pid2offset[int(line_arr[0])] = int(line_arr[1])
offset2pid[int(line_arr[1])] = int(line_arr[0])
return pid2offset, offset2pid
def preprocess(args):
pid2offset = {}
in_passage_path = os.path.join(
args.wiki_dir,
"psgs_w100.tsv" ,
)
out_passage_path = os.path.join(
args.out_data_dir,
"passages" ,
)
if os.path.exists(out_passage_path):
print("preprocessed data already exist, exit preprocessing")
return
else:
out_line_count = 0
print('start passage file split processing')
multi_file_process(args, 32, in_passage_path, out_passage_path, PassagePreprocessingFn)
print('start merging splits')
with open(out_passage_path, 'wb') as f:
for idx, record in enumerate(numbered_byte_file_generator(out_passage_path, 32, 8 + 4 + args.max_seq_length * 4)):
p_id = int.from_bytes(record[:8], 'big')
f.write(record[8:])
pid2offset[p_id] = idx
if idx < 3:
print(str(idx) + " " + str(p_id))
out_line_count += 1
print("Total lines written: " + str(out_line_count))
meta = {'type': 'int32', 'total_number': out_line_count, 'embedding_size': args.max_seq_length}
with open(out_passage_path + "_meta", 'w') as f:
json.dump(meta, f)
write_mapping(args, pid2offset, "pid2offset")
embedding_cache = EmbeddingCache(out_passage_path)
print("First line")
with embedding_cache as emb:
print(emb[pid2offset[1]])
if args.data_type == 0:
write_query_rel(args, pid2offset, "nq-train.json", "train-query", "train-ann", "train-data")
elif args.data_type == 1:
write_query_rel(args, pid2offset, "trivia-train.json", "train-query", "train-ann", "train-data", "psg_id")
else:
# use both training dataset and merge them
write_query_rel(args, pid2offset, "nq-train.json", "train-query-nq", "train-ann-nq", "train-data-nq")
write_query_rel(args, pid2offset, "trivia-train.json", "train-query-trivia", "train-ann-trivia", "train-data-trivia", "psg_id")
with open(args.out_data_dir + "train-query-nq", "rb") as nq_query, \
open(args.out_data_dir + "train-query-trivia", "rb") as trivia_query, \
open(args.out_data_dir + "train-query", "wb") as out_query:
out_query.write(nq_query.read())
out_query.write(trivia_query.read())
with open(args.out_data_dir + "train-query-nq_meta", "r", encoding='utf-8') as nq_query, \
open(args.out_data_dir + "train-query-trivia_meta", "r", encoding='utf-8') as trivia_query, \
open(args.out_data_dir + "train-query_meta", "w", encoding='utf-8') as out_query:
a = json.load(nq_query)
b = json.load(trivia_query)
meta = {'type': 'int32', 'total_number': a['total_number'] + b['total_number'], 'embedding_size': args.max_seq_length}
json.dump(meta, out_query)
embedding_cache = EmbeddingCache(args.out_data_dir + "train-query")
print("First line after merge")
with embedding_cache as emb:
print(emb[58812])
with open(args.out_data_dir + "train-ann-nq", "r", encoding='utf-8') as nq_ann, \
open(args.out_data_dir + "train-ann-trivia", "r", encoding='utf-8') as trivia_ann, \
open(args.out_data_dir + "train-ann", "w", encoding='utf-8') as out_ann:
out_ann.writelines(nq_ann.readlines())
out_ann.writelines(trivia_ann.readlines())
write_query_rel(args, pid2offset, "nq-dev.json", "dev-query", "dev-ann", "dev-data")
write_query_rel(args, pid2offset, "trivia-dev.json", "dev-query-trivia", "dev-ann-trivia", "dev-data-trivia", "psg_id")
write_qas_query(args, "nq-test.csv", "test-query")
write_qas_query(args, "trivia-test.csv", "trivia-test-query")
def PassagePreprocessingFn(args, line, tokenizer):
line_arr = list(csv.reader([line], delimiter='\t'))[0]
if line_arr[0] == 'id':
return bytearray()
p_id = int(line_arr[0])
text = line_arr[1]
title = line_arr[2]
token_ids = tokenizer.encode(title, text_pair=text, add_special_tokens=True,
max_length=args.max_seq_length,
pad_to_max_length=False)
seq_len = args.max_seq_length
passage_len = len(token_ids)
if len(token_ids) < seq_len:
token_ids = token_ids + [tokenizer.pad_token_id] * (seq_len - len(token_ids))
if len(token_ids) > seq_len:
token_ids = token_ids[0:seq_len]
token_ids[-1] = tokenizer.sep_token_id
if p_id < 5:
a = np.array(token_ids, np.int32)
print("pid {}, passagelen {}, shape {}".format(p_id, passage_len, a.shape))
return p_id.to_bytes(8, 'big') + passage_len.to_bytes(4, 'big') + np.array(token_ids, np.int32).tobytes()
def QueryPreprocessingFn(args, qid, text, tokenizer):
token_ids = tokenizer.encode(text, add_special_tokens=True, max_length=args.max_seq_length,
pad_to_max_length=False)
seq_len = args.max_seq_length
passage_len = len(token_ids)
if len(token_ids) < seq_len:
token_ids = token_ids + [tokenizer.pad_token_id] * (seq_len - len(token_ids))
if len(token_ids) > seq_len:
token_ids = token_ids[0:seq_len]
token_ids[-1] = tokenizer.sep_token_id
if qid < 5:
a = np.array(token_ids, np.int32)
print("qid {}, passagelen {}, shape {}".format(qid, passage_len, a.shape))
return passage_len.to_bytes(4, 'big') + np.array(token_ids, np.int32).tobytes()
def GetProcessingFn(args, query=False):
def fn(vals, i):
passage_len, passage = vals
max_len = args.max_seq_length
pad_len = max(0, max_len - passage_len)
token_type_ids = [0] * passage_len + [0] * pad_len
attention_mask = passage != 0
passage_collection = [(i, passage, attention_mask, token_type_ids)]
query2id_tensor = torch.tensor([f[0] for f in passage_collection], dtype=torch.long)
all_input_ids_a = torch.tensor([f[1] for f in passage_collection], dtype=torch.int)
all_attention_mask_a = torch.tensor([f[2] for f in passage_collection], dtype=torch.bool)
all_token_type_ids_a = torch.tensor([f[3] for f in passage_collection], dtype=torch.uint8)
dataset = TensorDataset(all_input_ids_a, all_attention_mask_a, all_token_type_ids_a, query2id_tensor)
return [ts for ts in dataset]
return fn
def GetTrainingDataProcessingFn(args, query_cache, passage_cache, shuffle=True):
def fn(line, i):
line_arr = line.split('\t')
qid = int(line_arr[0])
pos_pid = int(line_arr[1])
neg_pids = line_arr[2].split(',')
neg_pids = [int(neg_pid) for neg_pid in neg_pids]
all_input_ids_a = []
all_attention_mask_a = []
query_data = GetProcessingFn(args, query=True)(query_cache[qid], qid)[0]
pos_data = GetProcessingFn(args, query=False)(passage_cache[pos_pid], pos_pid)[0]
if shuffle:
random.shuffle(neg_pids)
neg_data = GetProcessingFn(args, query=False)(passage_cache[neg_pids[0]], neg_pids[0])[0]
yield (query_data[0], query_data[1], query_data[2], pos_data[0], pos_data[1], pos_data[2])
yield (query_data[0], query_data[1], query_data[2], neg_data[0], neg_data[1], neg_data[2])
return fn
def GetTripletTrainingDataProcessingFn(args, query_cache, passage_cache, shuffle=True):
def fn(line, i):
line_arr = line.split('\t')
qid = int(line_arr[0])
pos_pid = int(line_arr[1])
neg_pids = line_arr[2].split(',')
neg_pids = [int(neg_pid) for neg_pid in neg_pids]
all_input_ids_a = []
all_attention_mask_a = []
query_data = GetProcessingFn(args, query=True)(query_cache[qid], qid)[0]
pos_data = GetProcessingFn(args, query=False)(passage_cache[pos_pid], pos_pid)[0]
if shuffle:
random.shuffle(neg_pids)
neg_data = GetProcessingFn(args, query=False)(passage_cache[neg_pids[0]], neg_pids[0])[0]
yield (query_data[0], query_data[1], query_data[2], pos_data[0], pos_data[1], pos_data[2],
neg_data[0], neg_data[1], neg_data[2])
return fn
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"--out_data_dir",
default="/webdata-nfs/jialliu/dpr/ann/ann_multi_data_256/",
type=str,
help="The output data dir",
)
parser.add_argument(
"--model_type",
default="dpr",
type=str,
help="Model type selected in the list: " + ", ".join(MSMarcoConfigDict.keys()),
)
parser.add_argument(
"--model_name_or_path",
default="bert-base-uncased",
type=str,
help="Path to pre-trained model or shortcut name selected in the list: " +
", ".join(ALL_MODELS),
)
parser.add_argument(
"--max_seq_length",
default=256,
type=int,
help="The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded.",
)
parser.add_argument(
"--data_type",
default=0,
type=int,
help="0 is nq, 1 is trivia, 2 is both",
)
parser.add_argument(
"--question_dir",
type=str,
help="location of the raw QnA question data",
)
parser.add_argument(
"--wiki_dir",
type=str,
help="location of the wiki corpus",
)
parser.add_argument(
"--answer_dir",
type=str,
help="location of the QnA answers for evaluation",
)
args = parser.parse_args()
if not os.path.exists(args.out_data_dir):
os.makedirs(args.out_data_dir)
preprocess(args)
if __name__ == '__main__':
main()
| 14,512 | 34.923267 | 135 | py |
modir | modir-master/data/filter_train_qrel.py | import sys
fname = sys.argv[1]
qrel = []
with open(fname) as fin:
for line in fin:
a = line.strip().split('\t')
if int(a[-1]) > 0:
a[-1] = '1'
qrel.append('\t'.join(a))
with open(fname, 'w') as fout:
for line in qrel:
print(line, file=fout)
| 300 | 17.8125 | 37 | py |
modir | modir-master/model/domain_classifier.py | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import TensorDataset, DataLoader
class DomainClassifier(nn.Module):
def __init__(self,
args,
input_size=768,
n_class=2):
super(DomainClassifier, self).__init__()
if args.dc_layers == 1:
layers = [
nn.Linear(input_size, n_class, bias=False)
]
elif args.dc_layers == 2:
layers = [
nn.Linear(input_size, 200),
nn.ReLU(),
nn.Linear(200, n_class, bias=False)
]
elif args.dc_layers == 3:
layers = [
nn.Linear(input_size, 200),
nn.ReLU(),
nn.Linear(200, 200),
nn.ReLU(),
nn.Linear(200, n_class, bias=False)
]
else:
raise NotImplementedError()
self.layers = nn.ModuleList(layers)
def forward(self, inputs, labels=None):
"""
it doens't work for run_warmup_da.py for now
since it no longer supports lamb and gradient reversal
"""
x = inputs
for layer in self.layers:
x = layer(x)
logits = torch.clamp(x, min=-5.0, max=5.0)
if labels is None:
return logits
elif type(labels) is str:
assert labels == 'uniform'
return (
logits,
self.uniform_loss(logits),
None,
)
else:
return (
logits,
F.cross_entropy(logits, labels),
self.get_acc(logits, labels)
)
@staticmethod
def uniform_loss(logits):
batch_size = logits.shape[0]
device = logits.device
return (
F.cross_entropy(logits, torch.tensor([0] * batch_size, device=device)) + \
F.cross_entropy(logits, torch.tensor([1] * batch_size, device=device))
) / 2
@staticmethod
def get_acc(logits, labels):
preds = torch.argmax(logits, dim=1)
total = int(len(labels))
correct = int(sum(labels==preds))
return (total, correct, correct/total)
class DummyModule(nn.Module):
def __init__(self, *args, **kwargs):
super(DummyModule, self).__init__()
self.register_parameter(name='dummy', param=nn.Parameter(torch.randn(1)))
def forward(self, inputs, *args, **kwargs):
pass
def dry_test(model, device, test_dataset):
test_dataloader = DataLoader(test_dataset, batch_size=64, shuffle=True)
n_datasets = 2
total, correct = 0, 0
class_total, class_correct = [0 for _ in range(n_datasets)], [0 for _ in range(n_datasets)]
for batch_idx, batch in enumerate(test_dataloader):
inputs, labels = batch[0].to(device), batch[1].to(device)
outputs = model(inputs)
preds = torch.argmax(outputs, dim=1)
total += len(labels)
correct += sum(labels==preds)
for class_id in range(n_datasets):
class_total[class_id] += sum(labels==class_id)
class_correct[class_id] += sum(torch.logical_and(labels==class_id, preds==class_id))
result_dict = {'total_acc': int(correct)/total}
for class_id in range(n_datasets):
result_dict[f'class {class_id} acc'] = int(class_correct[class_id]) / int(class_total[class_id])
return {k: f'{v:.5f}' for k, v in result_dict.items()}
def dry_dc_evaluation(args, dc_model, query_embs, passage_embs,
prev_dc_state_dict):
# we take all queries from both domains
# and discard passages from one of the domains
# so that each domain has the same number of vectors (query+passage)
single_domain_query_size = min([x.shape[0] for x in query_embs])
single_domain_passage_size = min([x.shape[0] for x in passage_embs])
srd_query = query_embs[0][:single_domain_query_size]
srd_passage = passage_embs[0][:single_domain_passage_size]
tgd_query = np.concatenate([x[:single_domain_query_size] for x in query_embs[1:]])
tgd_passage = np.concatenate([x[:single_domain_passage_size] for x in passage_embs[1:]])
train_ratio = 0.7
srd_query_train_size = int(train_ratio * single_domain_query_size)
srd_passage_train_size = int(train_ratio * single_domain_passage_size)
tgd_query_train_size = int(train_ratio * single_domain_query_size) * (len(query_embs) - 1)
tgd_passage_train_size = int(train_ratio * single_domain_passage_size) * (len(query_embs) - 1)
train_query_dataset = TensorDataset(
torch.tensor(np.concatenate(
[srd_query[:srd_query_train_size],
tgd_query[:tgd_query_train_size]]
)),
torch.tensor(np.concatenate(
[[0] * srd_query_train_size,
[1] * tgd_query_train_size]
))
)
train_passage_dataset = TensorDataset(
torch.tensor(np.concatenate(
[srd_passage[:srd_passage_train_size],
tgd_passage[:tgd_passage_train_size]]
)),
torch.tensor(np.concatenate(
[[0] * srd_passage_train_size,
[1] * tgd_passage_train_size]
))
)
srd_query_test_size = single_domain_query_size - srd_query_train_size
srd_passage_test_size = single_domain_passage_size - srd_passage_train_size
tgd_query_test_size = single_domain_query_size * (len(query_embs) - 1) - tgd_query_train_size
tgd_passage_test_size = single_domain_passage_size * (len(query_embs) - 1) - tgd_passage_train_size
test_query_dataset = TensorDataset(
torch.tensor(np.concatenate(
[srd_query[srd_query_train_size:],
tgd_query[tgd_query_train_size:]]
)),
torch.tensor(np.concatenate(
[[0] * srd_query_test_size,
[1] * tgd_query_test_size]
))
)
test_passage_dataset = TensorDataset(
torch.tensor(np.concatenate(
[srd_passage[srd_passage_train_size:],
tgd_passage[tgd_passage_train_size:]]
)),
torch.tensor(np.concatenate(
[[0] * srd_passage_test_size,
[1] * tgd_passage_test_size]
))
)
if prev_dc_state_dict is not None:
prev_dc_model = DomainClassifier(args)
prev_dc_model.to(args.device)
prev_dc_model.load_state_dict(prev_dc_state_dict)
prev_test_query_results = dry_test(prev_dc_model, args.device, test_query_dataset)
prev_test_passage_results = dry_test(prev_dc_model, args.device, test_passage_dataset)
else:
prev_test_query_results = {'total_acc': None}
prev_test_passage_results = {'total_acc': None}
optimizer = torch.optim.Adam(dc_model.parameters(), lr=5e-4)
# if args.fp16:
# try:
# from apex import amp
# except ImportError:
# raise ImportError(
# "Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
# dc_model, optimizer = amp.initialize(
# dc_model, optimizer, opt_level=args.fp16_opt_level)
step = 0
total_step = 500 # actually it's 50 query and 50 passage, so 100 steps in total
train_query_dataloader = DataLoader(train_query_dataset, batch_size=48, shuffle=True)
train_passage_dataloader = DataLoader(train_passage_dataset, batch_size=48, shuffle=True)
query_iterator = iter(train_query_dataloader)
passage_iterator = iter(train_passage_dataloader)
while step < total_step:
try:
query_batch = next(query_iterator)
except StopIteration:
query_iterator = iter(train_query_dataloader)
query_batch = next(query_iterator)
try:
passage_batch = next(passage_iterator)
except StopIteration:
passage_iterator = iter(train_passage_dataloader)
passage_batch = next(passage_iterator)
step += 1
for batch in [query_batch, passage_batch]:
inputs, labels = batch[0].to(args.device), batch[1].to(args.device)
outputs = dc_model(inputs)
optimizer.zero_grad()
loss = F.cross_entropy(outputs, labels)
# if args.fp16:
# with amp.scale_loss(loss, optimizer) as scaled_loss:
# scaled_loss.backward()
# else:
loss.backward()
optimizer.step()
test_query_results = dry_test(dc_model, args.device, test_query_dataset)
test_passage_results = dry_test(dc_model, args.device, test_passage_dataset)
return (
[test_query_results['total_acc'], test_passage_results['total_acc']],
[prev_test_query_results['total_acc'], prev_test_passage_results['total_acc']],
dc_model.state_dict()
)
| 8,906 | 37.227468 | 104 | py |
modir | modir-master/model/models.py | import sys
sys.path += ['../']
import torch
from torch import nn
from transformers import (
RobertaConfig,
RobertaModel,
RobertaForSequenceClassification,
RobertaTokenizer,
BertModel,
BertTokenizer,
BertConfig
)
import torch.nn.functional as F
from data.process_fn import triple_process_fn, triple2dual_process_fn
class EmbeddingMixin:
"""
Mixin for common functions in most embedding models. Each model should define its own bert-like backbone and forward.
We inherit from RobertaModel to use from_pretrained
"""
def __init__(self, model_argobj):
if model_argobj is None:
self.use_mean = False
else:
self.use_mean = model_argobj.use_mean
print("Using mean:", self.use_mean)
def _init_weights(self, module):
""" Initialize the weights """
if isinstance(module, (nn.Linear, nn.Embedding, nn.Conv1d)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=0.02)
def masked_mean(self, t, mask):
s = torch.sum(t * mask.unsqueeze(-1).float(), axis=1)
d = mask.sum(axis=1, keepdim=True).float()
return s / d
def masked_mean_or_first(self, emb_all, mask):
# emb_all is a tuple from bert - sequence output, pooler
assert isinstance(emb_all, tuple)
if self.use_mean:
return self.masked_mean(emb_all[0], mask)
else:
return emb_all[0][:, 0]
def query_emb(self, input_ids, attention_mask):
raise NotImplementedError("Please Implement this method")
def body_emb(self, input_ids, attention_mask):
raise NotImplementedError("Please Implement this method")
class NLL(EmbeddingMixin):
def forward(
self,
query_ids,
attention_mask_q,
input_ids_a=None,
attention_mask_a=None,
input_ids_b=None,
attention_mask_b=None,
is_query=True,
output_dc_emb=False):
if input_ids_b is None and is_query:
return self.query_emb(query_ids, attention_mask_q)
elif input_ids_b is None:
return self.body_emb(query_ids, attention_mask_q)
q_embs = self.query_emb(query_ids, attention_mask_q)
a_embs = self.body_emb(input_ids_a, attention_mask_a)
b_embs = self.body_emb(input_ids_b, attention_mask_b)
logit_matrix = torch.cat([(q_embs * a_embs).sum(-1).unsqueeze(1),
(q_embs * b_embs).sum(-1).unsqueeze(1)], dim=1) # [B, 2]
lsm = F.log_softmax(logit_matrix, dim=1)
loss = -1.0 * lsm[:, 0]
if output_dc_emb:
return (loss.mean(), (q_embs, a_embs, b_embs))
else:
return (loss.mean(),)
class NLL_MultiChunk(EmbeddingMixin):
def forward(
self,
query_ids,
attention_mask_q,
input_ids_a=None,
attention_mask_a=None,
input_ids_b=None,
attention_mask_b=None,
is_query=True):
if input_ids_b is None and is_query:
return self.query_emb(query_ids, attention_mask_q)
elif input_ids_b is None:
return self.body_emb(query_ids, attention_mask_q)
q_embs = self.query_emb(query_ids, attention_mask_q)
a_embs = self.body_emb(input_ids_a, attention_mask_a)
b_embs = self.body_emb(input_ids_b, attention_mask_b)
[batchS, full_length] = input_ids_a.size()
chunk_factor = full_length // self.base_len
# special handle of attention mask -----
attention_mask_body = attention_mask_a.reshape(
batchS, chunk_factor, -1)[:, :, 0] # [batchS, chunk_factor]
inverted_bias = ((1 - attention_mask_body) * (-9999)).float()
a12 = torch.matmul(
q_embs.unsqueeze(1), a_embs.transpose(
1, 2)) # [batch, 1, chunk_factor]
logits_a = (a12[:, 0, :] + inverted_bias).max(dim=-
1, keepdim=False).values # [batch]
# -------------------------------------
# special handle of attention mask -----
attention_mask_body = attention_mask_b.reshape(
batchS, chunk_factor, -1)[:, :, 0] # [batchS, chunk_factor]
inverted_bias = ((1 - attention_mask_body) * (-9999)).float()
a12 = torch.matmul(
q_embs.unsqueeze(1), b_embs.transpose(
1, 2)) # [batch, 1, chunk_factor]
logits_b = (a12[:, 0, :] + inverted_bias).max(dim=-
1, keepdim=False).values # [batch]
# -------------------------------------
logit_matrix = torch.cat(
[logits_a.unsqueeze(1), logits_b.unsqueeze(1)], dim=1) # [B, 2]
lsm = F.log_softmax(logit_matrix, dim=1)
loss = -1.0 * lsm[:, 0]
return (loss.mean(),)
class RobertaDot_NLL_LN(NLL, RobertaForSequenceClassification):
"""None
Compress embedding to 200d, then computes NLL loss.
"""
def __init__(self, config, model_argobj=None):
NLL.__init__(self, model_argobj)
RobertaForSequenceClassification.__init__(self, config)
self.embeddingHead = nn.Linear(config.hidden_size, 768)
self.norm = nn.LayerNorm(768)
self.apply(self._init_weights)
def query_emb(self, input_ids, attention_mask):
outputs1 = self.roberta(input_ids=input_ids,
attention_mask=attention_mask)
full_emb = self.masked_mean_or_first(outputs1, attention_mask)
query1 = self.norm(self.embeddingHead(full_emb))
return query1
def body_emb(self, input_ids, attention_mask):
return self.query_emb(input_ids, attention_mask)
class RobertaDot_CLF_ANN_NLL_MultiChunk(NLL_MultiChunk, RobertaDot_NLL_LN):
def __init__(self, config):
RobertaDot_NLL_LN.__init__(self, config)
self.base_len = 512
def body_emb(self, input_ids, attention_mask):
[batchS, full_length] = input_ids.size()
chunk_factor = full_length // self.base_len
input_seq = input_ids.reshape(
batchS,
chunk_factor,
full_length //
chunk_factor).reshape(
batchS *
chunk_factor,
full_length //
chunk_factor)
attention_mask_seq = attention_mask.reshape(
batchS,
chunk_factor,
full_length //
chunk_factor).reshape(
batchS *
chunk_factor,
full_length //
chunk_factor)
outputs_k = self.roberta(input_ids=input_seq,
attention_mask=attention_mask_seq)
compressed_output_k = self.embeddingHead(
outputs_k[0]) # [batch, len, dim]
compressed_output_k = self.norm(compressed_output_k[:, 0, :])
[batch_expand, embeddingS] = compressed_output_k.size()
complex_emb_k = compressed_output_k.reshape(
batchS, chunk_factor, embeddingS)
return complex_emb_k # size [batchS, chunk_factor, embeddingS]
class HFBertEncoder(BertModel):
def __init__(self, config):
BertModel.__init__(self, config)
assert config.hidden_size > 0, 'Encoder hidden_size can\'t be zero'
self.init_weights()
@classmethod
def init_encoder(cls, args, dropout: float = 0.1):
cfg = BertConfig.from_pretrained("bert-base-uncased")
if dropout != 0:
cfg.attention_probs_dropout_prob = dropout
cfg.hidden_dropout_prob = dropout
return cls.from_pretrained("bert-base-uncased", config=cfg)
def forward(self, input_ids, attention_mask):
hidden_states = None
sequence_output, pooled_output = super().forward(input_ids=input_ids,
attention_mask=attention_mask)
pooled_output = sequence_output[:, 0, :]
return sequence_output, pooled_output, hidden_states
def get_out_size(self):
if self.encode_proj:
return self.encode_proj.out_features
return self.config.hidden_size
class BiEncoder(nn.Module):
""" Bi-Encoder model component. Encapsulates query/question and context/passage encoders.
"""
def __init__(self, args):
super(BiEncoder, self).__init__()
self.question_model = HFBertEncoder.init_encoder(args)
self.ctx_model = HFBertEncoder.init_encoder(args)
def query_emb(self, input_ids, attention_mask):
sequence_output, pooled_output, hidden_states = self.question_model(input_ids, attention_mask)
return pooled_output
def body_emb(self, input_ids, attention_mask):
sequence_output, pooled_output, hidden_states = self.ctx_model(input_ids, attention_mask)
return pooled_output
def forward(self, query_ids, attention_mask_q, input_ids_a = None, attention_mask_a = None, input_ids_b = None, attention_mask_b = None):
if input_ids_b is None:
q_embs = self.query_emb(query_ids, attention_mask_q)
a_embs = self.body_emb(input_ids_a, attention_mask_a)
return (q_embs, a_embs)
q_embs = self.query_emb(query_ids, attention_mask_q)
a_embs = self.body_emb(input_ids_a, attention_mask_a)
b_embs = self.body_emb(input_ids_b, attention_mask_b)
logit_matrix = torch.cat([(q_embs*a_embs).sum(-1).unsqueeze(1), (q_embs*b_embs).sum(-1).unsqueeze(1)], dim=1) #[B, 2]
lsm = F.log_softmax(logit_matrix, dim=1)
loss = -1.0*lsm[:,0]
return (loss.mean(),)
# --------------------------------------------------
ALL_MODELS = sum(
(
tuple(conf.pretrained_config_archive_map.keys())
for conf in (
RobertaConfig,
) if hasattr(conf,'pretrained_config_archive_map')
),
(),
)
default_process_fn = triple_process_fn
class MSMarcoConfig:
def __init__(self, name, model, process_fn=default_process_fn, use_mean=True, tokenizer_class=RobertaTokenizer, config_class=RobertaConfig):
self.name = name
self.process_fn = process_fn
self.model_class = model
self.use_mean = use_mean
self.tokenizer_class = tokenizer_class
self.config_class = config_class
configs = [
MSMarcoConfig(name="rdot_nll",
model=RobertaDot_NLL_LN,
use_mean=False,
),
MSMarcoConfig(name="rdot_nll_multi_chunk",
model=RobertaDot_CLF_ANN_NLL_MultiChunk,
use_mean=False,
),
MSMarcoConfig(name="dpr",
model=BiEncoder,
tokenizer_class=BertTokenizer,
config_class=BertConfig,
use_mean=False,
),
]
MSMarcoConfigDict = {cfg.name: cfg for cfg in configs}
| 11,058 | 35.863333 | 144 | py |
dsl | dsl-main/lib/haskell/natural4/src/L4/pdpa_read_predicates.py | import pandas as pd
import numpy as np
import re
fields = ['Predicates']
df = pd.read_csv('pdpa_predicates.csv', skipinitialspace=True, usecols=fields)
# See the keys
print(df.keys())
# See content in 'star_name'
# as a series
sentences_series = df.Predicates
print(type(sentences_series))
# as an array
sentences_array = df[["Predicates"]].to_numpy()
print(type(sentences_array))
# print(sentences_array)
# for loop to read array elements which are the lists in the list
print("There are", len(sentences_array), "predicates altogether.")
# remove unwanted characters in senntences
# rgx_list =["[", "]"]
# def clean_text(rgx_list, text):
# new_text = text
# for rgx_match in rgx_list:
# new_text = re.sub(rgx_match, '', new_text)
# return new_text
# Split the array of sentences into sentences
for i in sentences_array:
for text in i:
print (text)
# print(len(text.split()))
print ("complete") | 941 | 24.459459 | 78 | py |
dsl | dsl-main/lib/haskell/natural4/src/L4/treefrom.py | import spacy_udpipe
import sys
spacy_udpipe.download("en")
nlp = spacy_udpipe.load("en")
texts = sys.argv[1:]
def removePunct(ls):
return [l for l in ls if l[2] != 'punct']
def getTree(text):
for token in nlp(text):
trees = []
Tree = {}
if token.dep_.lower() == 'root':
Tree['root'] = [token.text, token.lemma_, token.dep_.lower(), token, text]
unfiltered = [[child.text, child.lemma_, child.dep_, child, text] for child in token.children]
Tree['children'] = unfiltered
Tree['children'] = removePunct(unfiltered)
trees.append(Tree)
return trees
# get different elements from token [text, lemma, dep, whole token]
def getElements(trees, el):
fun_elements = []
for tree in trees:
fun_elements.append(tree['root'][el])
children = tree['children']
for child in children:
if isinstance(child[el], int) == False:
fun_elements.append(replaceColon(child[el]))
return(fun_elements)
def replaceColon(el):
if el.find(':') != -1:
ind = el.index(':')
cap = el[ind+1].upper()
newStr = el[:ind] + cap + el[ind + 2:]
return newStr
return el
def writeFun(trees):
fun_elements = getElements(trees, 2)
fun_name = '_'.join(fun_elements)
fun_elements = [e.replace('case', 'case_') for e in fun_elements]
fun = fun_name + " : " + ' -> '.join(fun_elements) + ' -> UDS'
return [fun, getElements(trees, 4)[0]]
# def writeCat(trees):
def getFuns():
allFuns = []
for text in texts:
text = text.rstrip()
allTrees = getTree(text)
allFuns.append(writeFun(allTrees))
return allFuns
print(getFuns())
def uniqueFuns():
outfile = []
fun_dict = {}
for f in getFuns():
if f[0] not in fun_dict:
fun_dict[f[0]] = f[1]
fun_list = []
for f,s in fun_dict.items():
outfile.append([f, s])
return sorted(outfile)
def writeLabels():
# with open(abstractGrammar + '.label', 'w+') as labelFile:
labels = []
for eachFun in uniqueFuns():
print(eachFun)
eachLabel = "#fun " + eachFun[0].replace(': root ', 'head')
labels.push(eachLabel + "\n")
return labels | 2,101 | 24.02381 | 100 | py |
dsl | dsl-main/lib/haskell/natural4/src/L4/make_GF_files.py | import spacy_udpipe
import sys
import treefrom
import time
import os
import shutil
from pathlib import Path
filename = sys.argv[-2]
print('load ', filename)
abstractGrammar = sys.argv[-1]
print('load abstract ', abstractGrammar)
# massage the ud_relations to only have the labels
def extractUDLabels(line):
words = line.partition( ": ")
label = words[0]
if label == 'case':
label = 'case_'
newLabel = treefrom.replaceColon(label)
return(newLabel)
# get categories from ud_relations
def getCats():
udLabels = []
for line in open(os.path.join(sys.path[0], "ud_relations"), "r"):
labels = extractUDLabels(line)
udLabels.append(labels)
return udLabels
def coerceFunsAbs(cat):
return [(cat + "_"), ":", "X", "->", cat, ";"]
def coerceFunsConcrete(cat):
return [(cat + "_"), "x", "= TODO ;"]
def writeLabels():
with open(abstractGrammar + '.label', 'w+') as labelFile:
for eachFun in treefrom.uniqueFuns():
eachLabel = "#fun " + eachFun[0].replace(': root ', 'head')
labelFile.write(eachLabel + "\n")
# create an abstract GF file with user entered name
def makeAbstractGF(userGrammar):
abstractGF = open (abstractGrammar + ".gf", "w+")
abstractGF.truncate(0)
abstractGF.seek(0)
abstractGF.write(
"abstract "
+ abstractGrammar
+ " = {"
+ "\n\n\tflags"
+ "\n\t\tstartcat = UDS ;"
+ "\n\n\tcat"
)
for line in getCats():
abstractGF.write("\n\t\t" + line)
abstractGF.write(" ;")
abstractGF.write(
"\n\n\t -- coercion funs"
+ "\n\n\tfun"
)
# get coercedFuns
for line in getCats():
abstractGF.write("\n\t\t" + " ".join(coerceFunsAbs(line)))
abstractGF.write( "\n\n\tfun\n" )
print('length of unique funs ', len(treefrom.uniqueFuns()))
for line in treefrom.uniqueFuns():
abstractGF.write("\t\t" + line[0] + " ;\n")
abstractGF.write("\t--" + line[1] + " ;\n\n")
abstractGF.write("}")
abstractGF.close()
def makeConcreteGF(userGrammar):
concreteGF = open (abstractGrammar+ "Eng.gf", "w+")
concreteGF.truncate(0)
concreteGF.seek(0)
concreteGF.write(
"concrete "
+ abstractGrammar
+ "Eng of "
+ abstractGrammar
+ " = {"
+ "\n\n\tlincat"
+ "\n"
)
for line in getCats():
concreteGF.write("\n\t\t"
+ line
+ " = X ;"
)
concreteGF.write(
"\n\n\tlin"
+ "\n\t\t-- the coercion funs"
)
for line in getCats():
concreteGF.write("\n\t\t" + " ".join(coerceFunsConcrete(line)))
concreteGF.write("\n\n\t\t-- the actual funs")
for line in treefrom.uniqueFuns():
function = line[0].partition( ": ")
fun = function[2]
concreteGF.write("\n\t\t-- : " + fun)
funName = function[0]
simpleFuns = fun.replace("-> ", "")
argFuns = simpleFuns.replace("UDS", "")
concreteGF.write("\n\t\t"
+ funName
+ argFuns
+ "= TODO ;"
)
concreteGF.write("\n}")
concreteGF.close()
writeLabels()
makeAbstractGF(abstractGrammar)
makeConcreteGF(abstractGrammar)
print("check")
print(getCats()) | 3,240 | 24.519685 | 67 | py |
dsl | dsl-main/lib/haskell/natural4/src/L4/sentence.py | import spacy
import spacy_udpipe
import sys
#nlp = spacy.load("en_core_web_sm")
nlp = spacy_udpipe.load("en")
from spacy import displacy
from spacy_conll import init_parser
con = init_parser(
"en", "udpipe", include_headers=True
)
def getConll(x):
conll = x._.conll_str
return conll
text = sys.argv[1:]
doc_con = con(text)
conll = getConll(doc_con)
for line in conll.splitlines()[2:]:
line_list = line.split()
line_list[7] = line_list[7].lower()
if (line_list[1] == 'the'):
line_list[4] = "Quant"
line_list[5] = "FORM=0"
if (line_list[3] == 'NOUN'):
make_fun = "FUN=" + line_list[2] + "_N"
elif (line_list[3] == 'ADJ'):
make_fun = "FUN=" + line_list[2] + "_A"
elif (line_list[3] == 'DET'):
if (line_list[2] == 'the'):
make_fun = "FUN=DefArt"
else:
make_fun = "FUN=" + line_list[2] + "_Det"
elif (line_list[3] == 'VERB'):
make_fun = "FUN=" + line_list[2] + line_list[4]
elif (line_list[3] == 'PRON'):
make_fun = "FUN=" + line_list[3] + line_list[4]
elif (line_list[3] == 'CCONJ'):
make_fun = "FUN=" + line_list[2] + "_Conj"
elif (line_list[3] == 'AUX' and line_list[2] == 'be'):
make_fun = "FUN=UseComp"
else:
make_fun = "_"
line_list[-1] = make_fun
print(line_list)
#morpho(conll)
# nlp_pipe = spacy_udpipe.load("en")
# with open("spacy_udpipe.txt", "w") as f:
# for token in nlp_pipe(text):
# f.writelines([token.text, " ", token.lemma_, " ", token.pos_, " ", token.dep_, " ", token.head.text, "\n"])
| 1,515 | 24.694915 | 117 | py |
container | container-main/main.py | # Copyright (c) 2015-present, Facebook, Inc.
# All rights reserved.
import argparse
import datetime
import numpy as np
import time
import torch
import torch.backends.cudnn as cudnn
import json
from pathlib import Path
from timm.data import Mixup
from timm.models import create_model
from timm.loss import LabelSmoothingCrossEntropy, SoftTargetCrossEntropy
from timm.scheduler import create_scheduler
from timm.optim import create_optimizer
from timm.utils import NativeScaler, get_state_dict, ModelEma
from datasets import build_dataset
from engine import train_one_epoch, evaluate
from losses import DistillationLoss
from samplers import RASampler
import models
import utils
def get_args_parser():
parser = argparse.ArgumentParser('DeiT training and evaluation script', add_help=False)
parser.add_argument('--batch-size', default=64, type=int)
parser.add_argument('--epochs', default=300, type=int)
# Model parameters
parser.add_argument('--model', default='deit_base_patch16_224', type=str, metavar='MODEL',
help='Name of model to train')
parser.add_argument('--input-size', default=224, type=int, help='images input size')
parser.add_argument('--drop', type=float, default=0.0, metavar='PCT',
help='Dropout rate (default: 0.)')
parser.add_argument('--drop-path', type=float, default=0.1, metavar='PCT',
help='Drop path rate (default: 0.1)')
parser.add_argument('--model-ema', action='store_true')
parser.add_argument('--no-model-ema', action='store_false', dest='model_ema')
parser.set_defaults(model_ema=True)
parser.add_argument('--model-ema-decay', type=float, default=0.99996, help='')
parser.add_argument('--model-ema-force-cpu', action='store_true', default=False, help='')
# Optimizer parameters
parser.add_argument('--opt', default='adamw', type=str, metavar='OPTIMIZER',
help='Optimizer (default: "adamw"')
parser.add_argument('--opt-eps', default=1e-8, type=float, metavar='EPSILON',
help='Optimizer Epsilon (default: 1e-8)')
parser.add_argument('--opt-betas', default=None, type=float, nargs='+', metavar='BETA',
help='Optimizer Betas (default: None, use opt default)')
parser.add_argument('--clip-grad', type=float, default=None, metavar='NORM',
help='Clip gradient norm (default: None, no clipping)')
parser.add_argument('--momentum', type=float, default=0.9, metavar='M',
help='SGD momentum (default: 0.9)')
parser.add_argument('--weight-decay', type=float, default=0.05,
help='weight decay (default: 0.05)')
# Learning rate schedule parameters
parser.add_argument('--sched', default='cosine', type=str, metavar='SCHEDULER',
help='LR scheduler (default: "cosine"')
parser.add_argument('--lr', type=float, default=5e-4, metavar='LR',
help='learning rate (default: 5e-4)')
parser.add_argument('--lr-noise', type=float, nargs='+', default=None, metavar='pct, pct',
help='learning rate noise on/off epoch percentages')
parser.add_argument('--lr-noise-pct', type=float, default=0.67, metavar='PERCENT',
help='learning rate noise limit percent (default: 0.67)')
parser.add_argument('--lr-noise-std', type=float, default=1.0, metavar='STDDEV',
help='learning rate noise std-dev (default: 1.0)')
parser.add_argument('--warmup-lr', type=float, default=1e-6, metavar='LR',
help='warmup learning rate (default: 1e-6)')
parser.add_argument('--min-lr', type=float, default=1e-5, metavar='LR',
help='lower lr bound for cyclic schedulers that hit 0 (1e-5)')
parser.add_argument('--decay-epochs', type=float, default=30, metavar='N',
help='epoch interval to decay LR')
parser.add_argument('--warmup-epochs', type=int, default=5, metavar='N',
help='epochs to warmup LR, if scheduler supports')
parser.add_argument('--cooldown-epochs', type=int, default=10, metavar='N',
help='epochs to cooldown LR at min_lr, after cyclic schedule ends')
parser.add_argument('--patience-epochs', type=int, default=10, metavar='N',
help='patience epochs for Plateau LR scheduler (default: 10')
parser.add_argument('--decay-rate', '--dr', type=float, default=0.1, metavar='RATE',
help='LR decay rate (default: 0.1)')
# Augmentation parameters
parser.add_argument('--color-jitter', type=float, default=0.4, metavar='PCT',
help='Color jitter factor (default: 0.4)')
parser.add_argument('--aa', type=str, default='rand-m9-mstd0.5-inc1', metavar='NAME',
help='Use AutoAugment policy. "v0" or "original". " + \
"(default: rand-m9-mstd0.5-inc1)'),
parser.add_argument('--smoothing', type=float, default=0.1, help='Label smoothing (default: 0.1)')
parser.add_argument('--train-interpolation', type=str, default='bicubic',
help='Training interpolation (random, bilinear, bicubic default: "bicubic")')
parser.add_argument('--repeated-aug', action='store_true')
parser.add_argument('--no-repeated-aug', action='store_false', dest='repeated_aug')
parser.set_defaults(repeated_aug=True)
# * Random Erase params
parser.add_argument('--reprob', type=float, default=0.25, metavar='PCT',
help='Random erase prob (default: 0.25)')
parser.add_argument('--remode', type=str, default='pixel',
help='Random erase mode (default: "pixel")')
parser.add_argument('--recount', type=int, default=1,
help='Random erase count (default: 1)')
parser.add_argument('--resplit', action='store_true', default=False,
help='Do not random erase first (clean) augmentation split')
# * Mixup params
parser.add_argument('--mixup', type=float, default=0.8,
help='mixup alpha, mixup enabled if > 0. (default: 0.8)')
parser.add_argument('--cutmix', type=float, default=1.0,
help='cutmix alpha, cutmix enabled if > 0. (default: 1.0)')
parser.add_argument('--cutmix-minmax', type=float, nargs='+', default=None,
help='cutmix min/max ratio, overrides alpha and enables cutmix if set (default: None)')
parser.add_argument('--mixup-prob', type=float, default=1.0,
help='Probability of performing mixup or cutmix when either/both is enabled')
parser.add_argument('--mixup-switch-prob', type=float, default=0.5,
help='Probability of switching to cutmix when both mixup and cutmix enabled')
parser.add_argument('--mixup-mode', type=str, default='batch',
help='How to apply mixup/cutmix params. Per "batch", "pair", or "elem"')
# Distillation parameters
parser.add_argument('--teacher-model', default='regnety_160', type=str, metavar='MODEL',
help='Name of teacher model to train (default: "regnety_160"')
parser.add_argument('--teacher-path', type=str, default='')
parser.add_argument('--distillation-type', default='none', choices=['none', 'soft', 'hard'], type=str, help="")
parser.add_argument('--distillation-alpha', default=0.5, type=float, help="")
parser.add_argument('--distillation-tau', default=1.0, type=float, help="")
# * Finetuning params
parser.add_argument('--finetune', default='', help='finetune from checkpoint')
# Dataset parameters
parser.add_argument('--data-path', default='/datasets01/imagenet_full_size/061417/', type=str,
help='dataset path')
parser.add_argument('--data-set', default='IMNET', choices=['CIFAR', 'IMNET', 'INAT', 'INAT19'],
type=str, help='Image Net dataset path')
parser.add_argument('--inat-category', default='name',
choices=['kingdom', 'phylum', 'class', 'order', 'supercategory', 'family', 'genus', 'name'],
type=str, help='semantic granularity')
parser.add_argument('--output_dir', default='',
help='path where to save, empty for no saving')
parser.add_argument('--device', default='cuda',
help='device to use for training / testing')
parser.add_argument('--seed', default=0, type=int)
parser.add_argument('--resume', default='', help='resume from checkpoint')
parser.add_argument('--start_epoch', default=0, type=int, metavar='N',
help='start epoch')
parser.add_argument('--eval', action='store_true', help='Perform evaluation only')
parser.add_argument('--dist-eval', action='store_true', default=False, help='Enabling distributed evaluation')
parser.add_argument('--num_workers', default=10, type=int)
parser.add_argument('--pin-mem', action='store_true',
help='Pin CPU memory in DataLoader for more efficient (sometimes) transfer to GPU.')
parser.add_argument('--no-pin-mem', action='store_false', dest='pin_mem',
help='')
parser.set_defaults(pin_mem=True)
# distributed training parameters
parser.add_argument('--world_size', default=1, type=int,
help='number of distributed processes')
parser.add_argument('--dist_url', default='env://', help='url used to set up distributed training')
return parser
def main(args):
utils.init_distributed_mode(args)
print(args)
if args.distillation_type != 'none' and args.finetune and not args.eval:
raise NotImplementedError("Finetuning with distillation not yet supported")
device = torch.device(args.device)
# fix the seed for reproducibility
seed = args.seed + utils.get_rank()
torch.manual_seed(seed)
np.random.seed(seed)
# random.seed(seed)
cudnn.benchmark = True
dataset_train, args.nb_classes = build_dataset(is_train=True, args=args)
dataset_val, _ = build_dataset(is_train=False, args=args)
if True: # args.distributed:
num_tasks = utils.get_world_size()
global_rank = utils.get_rank()
if args.repeated_aug:
sampler_train = RASampler(
dataset_train, num_replicas=num_tasks, rank=global_rank, shuffle=True
)
else:
sampler_train = torch.utils.data.DistributedSampler(
dataset_train, num_replicas=num_tasks, rank=global_rank, shuffle=True
)
if args.dist_eval:
if len(dataset_val) % num_tasks != 0:
print('Warning: Enabling distributed evaluation with an eval dataset not divisible by process number. '
'This will slightly alter validation results as extra duplicate entries are added to achieve '
'equal num of samples per-process.')
sampler_val = torch.utils.data.DistributedSampler(
dataset_val, num_replicas=num_tasks, rank=global_rank, shuffle=False)
else:
sampler_val = torch.utils.data.SequentialSampler(dataset_val)
else:
sampler_train = torch.utils.data.RandomSampler(dataset_train)
sampler_val = torch.utils.data.SequentialSampler(dataset_val)
data_loader_train = torch.utils.data.DataLoader(
dataset_train, sampler=sampler_train,
batch_size=args.batch_size,
num_workers=args.num_workers,
pin_memory=args.pin_mem,
drop_last=True,
)
data_loader_val = torch.utils.data.DataLoader(
dataset_val, sampler=sampler_val,
batch_size=int(1.5 * args.batch_size),
num_workers=args.num_workers,
pin_memory=args.pin_mem,
drop_last=False
)
mixup_fn = None
mixup_active = args.mixup > 0 or args.cutmix > 0. or args.cutmix_minmax is not None
if mixup_active:
mixup_fn = Mixup(
mixup_alpha=args.mixup, cutmix_alpha=args.cutmix, cutmix_minmax=args.cutmix_minmax,
prob=args.mixup_prob, switch_prob=args.mixup_switch_prob, mode=args.mixup_mode,
label_smoothing=args.smoothing, num_classes=args.nb_classes)
print(f"Creating model: {args.model}")
model = create_model(
args.model,
pretrained=False,
num_classes=args.nb_classes,
drop_rate=args.drop,
drop_path_rate=args.drop_path,
drop_block_rate=None,
)
if args.finetune:
if args.finetune.startswith('https'):
checkpoint = torch.hub.load_state_dict_from_url(
args.finetune, map_location='cpu', check_hash=True)
else:
checkpoint = torch.load(args.finetune, map_location='cpu')
checkpoint_model = checkpoint['model']
state_dict = model.state_dict()
for k in ['head.weight', 'head.bias', 'head_dist.weight', 'head_dist.bias']:
if k in checkpoint_model and checkpoint_model[k].shape != state_dict[k].shape:
print(f"Removing key {k} from pretrained checkpoint")
del checkpoint_model[k]
# interpolate position embedding
pos_embed_checkpoint = checkpoint_model['pos_embed']
embedding_size = pos_embed_checkpoint.shape[-1]
num_patches = model.patch_embed.num_patches
num_extra_tokens = model.pos_embed.shape[-2] - num_patches
# height (== width) for the checkpoint position embedding
orig_size = int((pos_embed_checkpoint.shape[-2] - num_extra_tokens) ** 0.5)
# height (== width) for the new position embedding
new_size = int(num_patches ** 0.5)
# class_token and dist_token are kept unchanged
extra_tokens = pos_embed_checkpoint[:, :num_extra_tokens]
# only the position tokens are interpolated
pos_tokens = pos_embed_checkpoint[:, num_extra_tokens:]
pos_tokens = pos_tokens.reshape(-1, orig_size, orig_size, embedding_size).permute(0, 3, 1, 2)
pos_tokens = torch.nn.functional.interpolate(
pos_tokens, size=(new_size, new_size), mode='bicubic', align_corners=False)
pos_tokens = pos_tokens.permute(0, 2, 3, 1).flatten(1, 2)
new_pos_embed = torch.cat((extra_tokens, pos_tokens), dim=1)
checkpoint_model['pos_embed'] = new_pos_embed
model.load_state_dict(checkpoint_model, strict=False)
model.to(device)
model_ema = None
if args.model_ema:
# Important to create EMA model after cuda(), DP wrapper, and AMP but before SyncBN and DDP wrapper
model_ema = ModelEma(
model,
decay=args.model_ema_decay,
device='cpu' if args.model_ema_force_cpu else '',
resume='')
model_without_ddp = model
if args.distributed:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])
model_without_ddp = model.module
n_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad)
print('number of params:', n_parameters)
linear_scaled_lr = args.lr * args.batch_size * utils.get_world_size() / 512.0
args.lr = linear_scaled_lr
optimizer = create_optimizer(args, model_without_ddp)
loss_scaler = NativeScaler()
lr_scheduler, _ = create_scheduler(args, optimizer)
criterion = LabelSmoothingCrossEntropy()
if args.mixup > 0.:
# smoothing is handled with mixup label transform
criterion = SoftTargetCrossEntropy()
elif args.smoothing:
criterion = LabelSmoothingCrossEntropy(smoothing=args.smoothing)
else:
criterion = torch.nn.CrossEntropyLoss()
teacher_model = None
if args.distillation_type != 'none':
assert args.teacher_path, 'need to specify teacher-path when using distillation'
print(f"Creating teacher model: {args.teacher_model}")
teacher_model = create_model(
args.teacher_model,
pretrained=False,
num_classes=args.nb_classes,
global_pool='avg',
)
if args.teacher_path.startswith('https'):
checkpoint = torch.hub.load_state_dict_from_url(
args.teacher_path, map_location='cpu', check_hash=True)
else:
checkpoint = torch.load(args.teacher_path, map_location='cpu')
teacher_model.load_state_dict(checkpoint['model'])
teacher_model.to(device)
teacher_model.eval()
# wrap the criterion in our custom DistillationLoss, which
# just dispatches to the original criterion if args.distillation_type is 'none'
criterion = DistillationLoss(
criterion, teacher_model, args.distillation_type, args.distillation_alpha, args.distillation_tau
)
output_dir = Path(args.output_dir)
if args.resume:
if args.resume.startswith('https'):
checkpoint = torch.hub.load_state_dict_from_url(
args.resume, map_location='cpu', check_hash=True)
else:
checkpoint = torch.load(args.resume, map_location='cpu')
model_without_ddp.load_state_dict(checkpoint['model'])
if not args.eval and 'optimizer' in checkpoint and 'lr_scheduler' in checkpoint and 'epoch' in checkpoint:
optimizer.load_state_dict(checkpoint['optimizer'])
lr_scheduler.load_state_dict(checkpoint['lr_scheduler'])
args.start_epoch = checkpoint['epoch'] + 1
if args.model_ema:
utils._load_checkpoint_for_ema(model_ema, checkpoint['model_ema'])
if 'scaler' in checkpoint:
loss_scaler.load_state_dict(checkpoint['scaler'])
if args.eval:
test_stats = evaluate(data_loader_val, model, device)
print(f"Accuracy of the network on the {len(dataset_val)} test images: {test_stats['acc1']:.1f}%")
return
print(f"Start training for {args.epochs} epochs")
start_time = time.time()
max_accuracy = 0.0
for epoch in range(args.start_epoch, args.epochs):
if args.distributed:
data_loader_train.sampler.set_epoch(epoch)
train_stats = train_one_epoch(
model, criterion, data_loader_train,
optimizer, device, epoch, loss_scaler,
args.clip_grad, model_ema, mixup_fn,
set_training_mode=args.finetune == '' # keep in eval mode during finetuning
)
lr_scheduler.step(epoch)
if args.output_dir:
checkpoint_paths = [output_dir / 'checkpoint.pth']
for checkpoint_path in checkpoint_paths:
utils.save_on_master({
'model': model_without_ddp.state_dict(),
'optimizer': optimizer.state_dict(),
'lr_scheduler': lr_scheduler.state_dict(),
'epoch': epoch,
'model_ema': get_state_dict(model_ema),
'scaler': loss_scaler.state_dict(),
'args': args,
}, checkpoint_path)
test_stats = evaluate(data_loader_val, model, device)
print(f"Accuracy of the network on the {len(dataset_val)} test images: {test_stats['acc1']:.1f}%")
max_accuracy = max(max_accuracy, test_stats["acc1"])
print(f'Max accuracy: {max_accuracy:.2f}%')
log_stats = {**{f'train_{k}': v for k, v in train_stats.items()},
**{f'test_{k}': v for k, v in test_stats.items()},
'epoch': epoch,
'n_parameters': n_parameters}
if args.output_dir and utils.is_main_process():
with (output_dir / "log.txt").open("a") as f:
f.write(json.dumps(log_stats) + "\n")
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print('Training time {}'.format(total_time_str))
if __name__ == '__main__':
parser = argparse.ArgumentParser('DeiT training and evaluation script', parents=[get_args_parser()])
args = parser.parse_args()
if args.output_dir:
Path(args.output_dir).mkdir(parents=True, exist_ok=True)
main(args)
| 20,346 | 47.330166 | 119 | py |
container | container-main/losses.py | # Copyright (c) 2015-present, Facebook, Inc.
# All rights reserved.
"""
Implements the knowledge distillation loss
"""
import torch
from torch.nn import functional as F
class DistillationLoss(torch.nn.Module):
"""
This module wraps a standard criterion and adds an extra knowledge distillation loss by
taking a teacher model prediction and using it as additional supervision.
"""
def __init__(self, base_criterion: torch.nn.Module, teacher_model: torch.nn.Module,
distillation_type: str, alpha: float, tau: float):
super().__init__()
self.base_criterion = base_criterion
self.teacher_model = teacher_model
assert distillation_type in ['none', 'soft', 'hard']
self.distillation_type = distillation_type
self.alpha = alpha
self.tau = tau
def forward(self, inputs, outputs, labels):
"""
Args:
inputs: The original inputs that are feed to the teacher model
outputs: the outputs of the model to be trained. It is expected to be
either a Tensor, or a Tuple[Tensor, Tensor], with the original output
in the first position and the distillation predictions as the second output
labels: the labels for the base criterion
"""
outputs_kd = None
if not isinstance(outputs, torch.Tensor):
# assume that the model outputs a tuple of [outputs, outputs_kd]
outputs, outputs_kd = outputs
base_loss = self.base_criterion(outputs, labels)
if self.distillation_type == 'none':
return base_loss
if outputs_kd is None:
raise ValueError("When knowledge distillation is enabled, the model is "
"expected to return a Tuple[Tensor, Tensor] with the output of the "
"class_token and the dist_token")
# don't backprop throught the teacher
with torch.no_grad():
teacher_outputs = self.teacher_model(inputs)
if self.distillation_type == 'soft':
T = self.tau
# taken from https://github.com/peterliht/knowledge-distillation-pytorch/blob/master/model/net.py#L100
# with slight modifications
distillation_loss = F.kl_div(
F.log_softmax(outputs_kd / T, dim=1),
F.log_softmax(teacher_outputs / T, dim=1),
reduction='sum',
log_target=True
) * (T * T) / outputs_kd.numel()
elif self.distillation_type == 'hard':
distillation_loss = F.cross_entropy(outputs_kd, teacher_outputs.argmax(dim=1))
loss = base_loss * (1 - self.alpha) + distillation_loss * self.alpha
return loss
| 2,771 | 41.646154 | 114 | py |
container | container-main/engine.py | # Copyright (c) 2015-present, Facebook, Inc.
# All rights reserved.
"""
Train and eval functions used in main.py
"""
import math
import sys
from typing import Iterable, Optional
import torch
from timm.data import Mixup
from timm.utils import accuracy, ModelEma
from losses import DistillationLoss
import utils
def train_one_epoch(model: torch.nn.Module, criterion: DistillationLoss,
data_loader: Iterable, optimizer: torch.optim.Optimizer,
device: torch.device, epoch: int, loss_scaler, max_norm: float = 0,
model_ema: Optional[ModelEma] = None, mixup_fn: Optional[Mixup] = None,
set_training_mode=True):
model.train(set_training_mode)
metric_logger = utils.MetricLogger(delimiter=" ")
metric_logger.add_meter('lr', utils.SmoothedValue(window_size=1, fmt='{value:.6f}'))
header = 'Epoch: [{}]'.format(epoch)
print_freq = 10
for samples, targets in metric_logger.log_every(data_loader, print_freq, header):
samples = samples.to(device, non_blocking=True)
targets = targets.to(device, non_blocking=True)
if mixup_fn is not None:
samples, targets = mixup_fn(samples, targets)
with torch.cuda.amp.autocast():
outputs = model(samples)
loss = criterion(samples, outputs, targets)
loss_value = loss.item()
if not math.isfinite(loss_value):
print("Loss is {}, stopping training".format(loss_value))
sys.exit(1)
optimizer.zero_grad()
# this attribute is added by timm on one optimizer (adahessian)
is_second_order = hasattr(optimizer, 'is_second_order') and optimizer.is_second_order
loss_scaler(loss, optimizer, clip_grad=max_norm,
parameters=model.parameters(), create_graph=is_second_order)
torch.cuda.synchronize()
if model_ema is not None:
model_ema.update(model)
metric_logger.update(loss=loss_value)
metric_logger.update(lr=optimizer.param_groups[0]["lr"])
# gather the stats from all processes
metric_logger.synchronize_between_processes()
print("Averaged stats:", metric_logger)
return {k: meter.global_avg for k, meter in metric_logger.meters.items()}
@torch.no_grad()
def evaluate(data_loader, model, device):
criterion = torch.nn.CrossEntropyLoss()
metric_logger = utils.MetricLogger(delimiter=" ")
header = 'Test:'
# switch to evaluation mode
model.eval()
for images, target in metric_logger.log_every(data_loader, 10, header):
images = images.to(device, non_blocking=True)
target = target.to(device, non_blocking=True)
# compute output
with torch.cuda.amp.autocast():
output = model(images)
loss = criterion(output, target)
acc1, acc5 = accuracy(output, target, topk=(1, 5))
batch_size = images.shape[0]
metric_logger.update(loss=loss.item())
metric_logger.meters['acc1'].update(acc1.item(), n=batch_size)
metric_logger.meters['acc5'].update(acc5.item(), n=batch_size)
# gather the stats from all processes
metric_logger.synchronize_between_processes()
print('* Acc@1 {top1.global_avg:.3f} Acc@5 {top5.global_avg:.3f} loss {losses.global_avg:.3f}'
.format(top1=metric_logger.acc1, top5=metric_logger.acc5, losses=metric_logger.loss))
return {k: meter.global_avg for k, meter in metric_logger.meters.items()}
| 3,508 | 35.175258 | 98 | py |
container | container-main/hubconf.py | # Copyright (c) 2015-present, Facebook, Inc.
# All rights reserved.
from models import *
dependencies = ["torch", "torchvision", "timm"]
| 138 | 22.166667 | 47 | py |
container | container-main/run_with_submitit.py | # Copyright (c) 2015-present, Facebook, Inc.
# All rights reserved.
"""
A script to run multinode training with submitit.
"""
import argparse
import os
import uuid
from pathlib import Path
import main as classification
import submitit
def parse_args():
classification_parser = classification.get_args_parser()
parser = argparse.ArgumentParser("Submitit for DeiT", parents=[classification_parser])
parser.add_argument("--ngpus", default=8, type=int, help="Number of gpus to request on each node")
parser.add_argument("--nodes", default=2, type=int, help="Number of nodes to request")
parser.add_argument("--timeout", default=2800, type=int, help="Duration of the job")
parser.add_argument("--job_dir", default="", type=str, help="Job dir. Leave empty for automatic.")
parser.add_argument("--partition", default="learnfair", type=str, help="Partition where to submit")
parser.add_argument("--use_volta32", action='store_true', help="Big models? Use this")
parser.add_argument('--comment', default="", type=str,
help='Comment to pass to scheduler, e.g. priority message')
return parser.parse_args()
def get_shared_folder() -> Path:
user = os.getenv("USER")
if Path("/checkpoint/").is_dir():
p = Path(f"/checkpoint/{user}/experiments")
p.mkdir(exist_ok=True)
return p
raise RuntimeError("No shared folder available")
def get_init_file():
# Init file must not exist, but it's parent dir must exist.
os.makedirs(str(get_shared_folder()), exist_ok=True)
init_file = get_shared_folder() / f"{uuid.uuid4().hex}_init"
if init_file.exists():
os.remove(str(init_file))
return init_file
class Trainer(object):
def __init__(self, args):
self.args = args
def __call__(self):
import main as classification
self._setup_gpu_args()
classification.main(self.args)
def checkpoint(self):
import os
import submitit
self.args.dist_url = get_init_file().as_uri()
checkpoint_file = os.path.join(self.args.output_dir, "checkpoint.pth")
if os.path.exists(checkpoint_file):
self.args.resume = checkpoint_file
print("Requeuing ", self.args)
empty_trainer = type(self)(self.args)
return submitit.helpers.DelayedSubmission(empty_trainer)
def _setup_gpu_args(self):
import submitit
from pathlib import Path
job_env = submitit.JobEnvironment()
self.args.output_dir = Path(str(self.args.output_dir).replace("%j", str(job_env.job_id)))
self.args.gpu = job_env.local_rank
self.args.rank = job_env.global_rank
self.args.world_size = job_env.num_tasks
print(f"Process group: {job_env.num_tasks} tasks, rank: {job_env.global_rank}")
def main():
args = parse_args()
if args.job_dir == "":
args.job_dir = get_shared_folder() / "%j"
# Note that the folder will depend on the job_id, to easily track experiments
executor = submitit.AutoExecutor(folder=args.job_dir, slurm_max_num_timeout=30)
num_gpus_per_node = args.ngpus
nodes = args.nodes
timeout_min = args.timeout
partition = args.partition
kwargs = {}
if args.use_volta32:
kwargs['slurm_constraint'] = 'volta32gb'
if args.comment:
kwargs['slurm_comment'] = args.comment
executor.update_parameters(
mem_gb=40 * num_gpus_per_node,
gpus_per_node=num_gpus_per_node,
tasks_per_node=num_gpus_per_node, # one task per GPU
cpus_per_task=10,
nodes=nodes,
timeout_min=timeout_min, # max is 60 * 72
# Below are cluster dependent parameters
slurm_partition=partition,
slurm_signal_delay_s=120,
**kwargs
)
executor.update_parameters(name="deit")
args.dist_url = get_init_file().as_uri()
args.output_dir = args.job_dir
trainer = Trainer(args)
job = executor.submit(trainer)
print("Submitted job_id:", job.job_id)
if __name__ == "__main__":
main()
| 4,075 | 31.094488 | 103 | py |
container | container-main/utils.py | # Copyright (c) 2015-present, Facebook, Inc.
# All rights reserved.
"""
Misc functions, including distributed helpers.
Mostly copy-paste from torchvision references.
"""
import io
import os
import time
from collections import defaultdict, deque
import datetime
import torch
import torch.distributed as dist
class SmoothedValue(object):
"""Track a series of values and provide access to smoothed values over a
window or the global series average.
"""
def __init__(self, window_size=20, fmt=None):
if fmt is None:
fmt = "{median:.4f} ({global_avg:.4f})"
self.deque = deque(maxlen=window_size)
self.total = 0.0
self.count = 0
self.fmt = fmt
def update(self, value, n=1):
self.deque.append(value)
self.count += n
self.total += value * n
def synchronize_between_processes(self):
"""
Warning: does not synchronize the deque!
"""
if not is_dist_avail_and_initialized():
return
t = torch.tensor([self.count, self.total], dtype=torch.float64, device='cuda')
dist.barrier()
dist.all_reduce(t)
t = t.tolist()
self.count = int(t[0])
self.total = t[1]
@property
def median(self):
d = torch.tensor(list(self.deque))
return d.median().item()
@property
def avg(self):
d = torch.tensor(list(self.deque), dtype=torch.float32)
return d.mean().item()
@property
def global_avg(self):
return self.total / self.count
@property
def max(self):
return max(self.deque)
@property
def value(self):
return self.deque[-1]
def __str__(self):
return self.fmt.format(
median=self.median,
avg=self.avg,
global_avg=self.global_avg,
max=self.max,
value=self.value)
class MetricLogger(object):
def __init__(self, delimiter="\t"):
self.meters = defaultdict(SmoothedValue)
self.delimiter = delimiter
def update(self, **kwargs):
for k, v in kwargs.items():
if isinstance(v, torch.Tensor):
v = v.item()
assert isinstance(v, (float, int))
self.meters[k].update(v)
def __getattr__(self, attr):
if attr in self.meters:
return self.meters[attr]
if attr in self.__dict__:
return self.__dict__[attr]
raise AttributeError("'{}' object has no attribute '{}'".format(
type(self).__name__, attr))
def __str__(self):
loss_str = []
for name, meter in self.meters.items():
loss_str.append(
"{}: {}".format(name, str(meter))
)
return self.delimiter.join(loss_str)
def synchronize_between_processes(self):
for meter in self.meters.values():
meter.synchronize_between_processes()
def add_meter(self, name, meter):
self.meters[name] = meter
def log_every(self, iterable, print_freq, header=None):
i = 0
if not header:
header = ''
start_time = time.time()
end = time.time()
iter_time = SmoothedValue(fmt='{avg:.4f}')
data_time = SmoothedValue(fmt='{avg:.4f}')
space_fmt = ':' + str(len(str(len(iterable)))) + 'd'
log_msg = [
header,
'[{0' + space_fmt + '}/{1}]',
'eta: {eta}',
'{meters}',
'time: {time}',
'data: {data}'
]
if torch.cuda.is_available():
log_msg.append('max mem: {memory:.0f}')
log_msg = self.delimiter.join(log_msg)
MB = 1024.0 * 1024.0
for obj in iterable:
data_time.update(time.time() - end)
yield obj
iter_time.update(time.time() - end)
if i % print_freq == 0 or i == len(iterable) - 1:
eta_seconds = iter_time.global_avg * (len(iterable) - i)
eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))
if torch.cuda.is_available():
print(log_msg.format(
i, len(iterable), eta=eta_string,
meters=str(self),
time=str(iter_time), data=str(data_time),
memory=torch.cuda.max_memory_allocated() / MB))
else:
print(log_msg.format(
i, len(iterable), eta=eta_string,
meters=str(self),
time=str(iter_time), data=str(data_time)))
i += 1
end = time.time()
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print('{} Total time: {} ({:.4f} s / it)'.format(
header, total_time_str, total_time / len(iterable)))
def _load_checkpoint_for_ema(model_ema, checkpoint):
"""
Workaround for ModelEma._load_checkpoint to accept an already-loaded object
"""
mem_file = io.BytesIO()
torch.save(checkpoint, mem_file)
mem_file.seek(0)
model_ema._load_checkpoint(mem_file)
def setup_for_distributed(is_master):
"""
This function disables printing when not in master process
"""
import builtins as __builtin__
builtin_print = __builtin__.print
def print(*args, **kwargs):
force = kwargs.pop('force', False)
if is_master or force:
builtin_print(*args, **kwargs)
__builtin__.print = print
def is_dist_avail_and_initialized():
if not dist.is_available():
return False
if not dist.is_initialized():
return False
return True
def get_world_size():
if not is_dist_avail_and_initialized():
return 1
return dist.get_world_size()
def get_rank():
if not is_dist_avail_and_initialized():
return 0
return dist.get_rank()
def is_main_process():
return get_rank() == 0
def save_on_master(*args, **kwargs):
if is_main_process():
torch.save(*args, **kwargs)
def init_distributed_mode(args):
if 'RANK' in os.environ and 'WORLD_SIZE' in os.environ:
args.rank = int(os.environ["RANK"])
args.world_size = int(os.environ['WORLD_SIZE'])
args.gpu = int(os.environ['LOCAL_RANK'])
elif 'SLURM_PROCID' in os.environ:
args.rank = int(os.environ['SLURM_PROCID'])
args.gpu = args.rank % torch.cuda.device_count()
else:
print('Not using distributed mode')
args.distributed = False
return
args.distributed = True
torch.cuda.set_device(args.gpu)
args.dist_backend = 'nccl'
print('| distributed init (rank {}): {}'.format(
args.rank, args.dist_url), flush=True)
torch.distributed.init_process_group(backend=args.dist_backend, init_method=args.dist_url,
world_size=args.world_size, rank=args.rank)
torch.distributed.barrier()
setup_for_distributed(args.rank == 0)
| 7,067 | 28.573222 | 94 | py |
container | container-main/datasets.py | # Copyright (c) 2015-present, Facebook, Inc.
# All rights reserved.
import os
import json
from torchvision import datasets, transforms
from torchvision.datasets.folder import ImageFolder, default_loader
from timm.data.constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.data import create_transform
class INatDataset(ImageFolder):
def __init__(self, root, train=True, year=2018, transform=None, target_transform=None,
category='name', loader=default_loader):
self.transform = transform
self.loader = loader
self.target_transform = target_transform
self.year = year
# assert category in ['kingdom','phylum','class','order','supercategory','family','genus','name']
path_json = os.path.join(root, f'{"train" if train else "val"}{year}.json')
with open(path_json) as json_file:
data = json.load(json_file)
with open(os.path.join(root, 'categories.json')) as json_file:
data_catg = json.load(json_file)
path_json_for_targeter = os.path.join(root, f"train{year}.json")
with open(path_json_for_targeter) as json_file:
data_for_targeter = json.load(json_file)
targeter = {}
indexer = 0
for elem in data_for_targeter['annotations']:
king = []
king.append(data_catg[int(elem['category_id'])][category])
if king[0] not in targeter.keys():
targeter[king[0]] = indexer
indexer += 1
self.nb_classes = len(targeter)
self.samples = []
for elem in data['images']:
cut = elem['file_name'].split('/')
target_current = int(cut[2])
path_current = os.path.join(root, cut[0], cut[2], cut[3])
categors = data_catg[target_current]
target_current_true = targeter[categors[category]]
self.samples.append((path_current, target_current_true))
# __getitem__ and __len__ inherited from ImageFolder
def build_dataset(is_train, args):
transform = build_transform(is_train, args)
if args.data_set == 'CIFAR':
dataset = datasets.CIFAR100(args.data_path, train=is_train, transform=transform)
nb_classes = 100
elif args.data_set == 'IMNET':
root = os.path.join(args.data_path, 'train' if is_train else 'val')
dataset = datasets.ImageFolder(root, transform=transform)
nb_classes = 1000
elif args.data_set == 'INAT':
dataset = INatDataset(args.data_path, train=is_train, year=2018,
category=args.inat_category, transform=transform)
nb_classes = dataset.nb_classes
elif args.data_set == 'INAT19':
dataset = INatDataset(args.data_path, train=is_train, year=2019,
category=args.inat_category, transform=transform)
nb_classes = dataset.nb_classes
return dataset, nb_classes
def build_transform(is_train, args):
resize_im = args.input_size > 32
if is_train:
# this should always dispatch to transforms_imagenet_train
transform = create_transform(
input_size=args.input_size,
is_training=True,
color_jitter=args.color_jitter,
auto_augment=args.aa,
interpolation=args.train_interpolation,
re_prob=args.reprob,
re_mode=args.remode,
re_count=args.recount,
)
if not resize_im:
# replace RandomResizedCropAndInterpolation with
# RandomCrop
transform.transforms[0] = transforms.RandomCrop(
args.input_size, padding=4)
return transform
t = []
if resize_im:
size = int((256 / 224) * args.input_size)
t.append(
transforms.Resize(size, interpolation=3), # to maintain same ratio w.r.t. 224 images
)
t.append(transforms.CenterCrop(args.input_size))
t.append(transforms.ToTensor())
t.append(transforms.Normalize(IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD))
return transforms.Compose(t)
| 4,114 | 36.409091 | 105 | py |
container | container-main/models.py | import torch
import torch.nn as nn
from functools import partial
import math
from timm.models.vision_transformer import VisionTransformer, _cfg
from timm.models.registry import register_model
from timm.models.layers import trunc_normal_, DropPath, to_2tuple
import pdb
__all__ = [
'deit_tiny_patch16_224', 'deit_small_patch16_224', 'deit_base_patch16_224',
'deit_tiny_distilled_patch16_224', 'deit_small_distilled_patch16_224',
'deit_base_distilled_patch16_224', 'deit_base_patch16_384',
'deit_base_distilled_patch16_384', 'container_light'
]
class Mlp(nn.Module):
# taken from https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class CMlp(nn.Module):
# taken from https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Conv2d(in_features, hidden_features, 1)
self.act = act_layer()
self.fc2 = nn.Conv2d(hidden_features, out_features, 1)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class Attention(nn.Module):
# taken from https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py
def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0.):
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
# NOTE scale factor was wrong in my original version, can set manually to be compat with prev weights
self.scale = qk_scale or head_dim ** -0.5
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x):
pdb.set_trace()
B, N, C = x.shape
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple)
attn = (q @ k.transpose(-2, -1)) * self.scale
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
class Attention_pure(nn.Module):
# taken from https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py
def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0.):
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
# NOTE scale factor was wrong in my original version, can set manually to be compat with prev weights
self.scale = qk_scale or head_dim ** -0.5
self.attn_drop = nn.Dropout(attn_drop)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x):
B, N, C = x.shape
C = int(C // 3)
qkv = x.reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple)
attn = (q @ k.transpose(-2, -1)) * self.scale
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
x = self.proj_drop(x)
return x
class MixBlock(nn.Module):
# taken from https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py
def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0.,
drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm):
super().__init__()
self.dim = dim
self.pos_embed = nn.Conv2d(dim, dim, 3, padding=1, groups=dim)
self.norm1 = nn.BatchNorm2d(dim)
self.conv1 = nn.Conv2d(dim, 3 * dim, 1)
self.conv2 = nn.Conv2d(dim, dim, 1)
self.conv = nn.Conv2d(dim, dim, 5, padding=2, groups=dim)
self.attn = Attention_pure(
dim,
num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale,
attn_drop=attn_drop, proj_drop=drop)
# NOTE: drop path for stochastic depth, we shall see if this is better than dropout here
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = nn.BatchNorm2d(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = CMlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
self.sa_weight = nn.Parameter(torch.Tensor([0.0]))
def forward(self, x):
x = x + self.pos_embed(x)
B, _, H, W = x.shape
residual = x
x = self.norm1(x)
qkv = self.conv1(x)
conv = qkv[:, 2 * self.dim:, :, :]
conv = self.conv(conv)
sa = qkv.flatten(2).transpose(1, 2)
sa = self.attn(sa)
sa = sa.reshape(B, H, W, -1).permute(0, 3, 1, 2).contiguous()
x = residual + self.drop_path(self.conv2(torch.sigmoid(self.sa_weight) * sa + (1 - torch.sigmoid(self.sa_weight)) * conv))
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x
class CBlock(nn.Module):
# taken from https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py
def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0.,
drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm):
super().__init__()
self.pos_embed = nn.Conv2d(dim, dim, 3, padding=1, groups=dim)
self.norm1 = nn.BatchNorm2d(dim)
self.conv1 = nn.Conv2d(dim, dim, 1)
self.conv2 = nn.Conv2d(dim, dim, 1)
self.attn = nn.Conv2d(dim, dim, 5, padding=2, groups=dim)
# NOTE: drop path for stochastic depth, we shall see if this is better than dropout here
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = nn.BatchNorm2d(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = CMlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
def forward(self, x):
x = x + self.pos_embed(x)
x = x + self.drop_path(self.conv2(self.attn(self.conv1(self.norm1(x)))))
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x
class Block(nn.Module):
# taken from https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py
def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0.,
drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm):
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = Attention(
dim,
num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale,
attn_drop=attn_drop, proj_drop=drop)
# NOTE: drop path for stochastic depth, we shall see if this is better than dropout here
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
def forward(self, x):
x = x + self.drop_path(self.attn(self.norm1(x)))
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x
class PatchEmbed(nn.Module):
# taken from https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py
""" Image to Patch Embedding
"""
def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768):
super().__init__()
img_size = to_2tuple(img_size)
patch_size = to_2tuple(patch_size)
num_patches = (img_size[1] // patch_size[1]) * (img_size[0] // patch_size[0])
self.img_size = img_size
self.patch_size = patch_size
self.num_patches = num_patches
self.norm = nn.LayerNorm(embed_dim)
self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size)
def forward(self, x):
B, C, H, W = x.shape
# FIXME look at relaxing size constraints
assert H == self.img_size[0] and W == self.img_size[1], \
f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})."
x = self.proj(x)
B, C, H, W = x.shape
x = x.flatten(2).transpose(1, 2)
x = self.norm(x)
x = x.reshape(B, H, W, -1).permute(0, 3, 1, 2).contiguous()
return x
class HybridEmbed(nn.Module):
# taken from https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py
def __init__(self, backbone, img_size=224, feature_size=None, in_chans=3, embed_dim=768):
super().__init__()
assert isinstance(backbone, nn.Module)
img_size = to_2tuple(img_size)
self.img_size = img_size
self.backbone = backbone
if feature_size is None:
with torch.no_grad():
# FIXME this is hacky, but most reliable way of determining the exact dim of the output feature
# map for all networks, the feature metadata has reliable channel and stride info, but using
# stride to calc feature dim requires info about padding of each stage that isn't captured.
training = backbone.training
if training:
backbone.eval()
o = self.backbone(torch.zeros(1, in_chans, img_size[0], img_size[1]))
if isinstance(o, (list, tuple)):
o = o[-1] # last feature if backbone outputs list/tuple of features
feature_size = o.shape[-2:]
feature_dim = o.shape[1]
backbone.train(training)
else:
feature_size = to_2tuple(feature_size)
if hasattr(self.backbone, 'feature_info'):
feature_dim = self.backbone.feature_info.channels()[-1]
else:
feature_dim = self.backbone.num_features
self.num_patches = feature_size[0] * feature_size[1]
self.proj = nn.Conv2d(feature_dim, embed_dim, 1)
def forward(self, x):
x = self.backbone(x)
if isinstance(x, (list, tuple)):
x = x[-1] # last feature if backbone outputs list/tuple of features
x = self.proj(x).flatten(2).transpose(1, 2)
return x
class VisionTransformer(nn.Module):
# taken from https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py
""" Vision Transformer
A PyTorch impl of : `An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale` -
https://arxiv.org/abs/2010.11929
"""
def __init__(self, img_size=[224, 56, 28, 14], patch_size=[4, 2, 2, 2], in_chans=3, num_classes=1000, embed_dim=[64, 128, 320, 512], depth=[3, 4, 8, 3],
num_heads=12, mlp_ratio=[8, 8, 4, 4], qkv_bias=True, qk_scale=None, representation_size=None,
drop_rate=0., attn_drop_rate=0., drop_path_rate=0., hybrid_backbone=None, norm_layer=None):
"""
Args:
img_size (int, tuple): input image size
patch_size (int, tuple): patch size
in_chans (int): number of input channels
num_classes (int): number of classes for classification head
embed_dim (int): embedding dimension
depth (int): depth of transformer
num_heads (int): number of attention heads
mlp_ratio (int): ratio of mlp hidden dim to embedding dim
qkv_bias (bool): enable bias for qkv if True
qk_scale (float): override default qk scale of head_dim ** -0.5 if set
representation_size (Optional[int]): enable and set representation layer (pre-logits) to this value if set
drop_rate (float): dropout rate
attn_drop_rate (float): attention dropout rate
drop_path_rate (float): stochastic depth rate
hybrid_backbone (nn.Module): CNN backbone to use in-place of PatchEmbed module
norm_layer: (nn.Module): normalization layer
"""
super().__init__()
self.num_classes = num_classes
self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models
norm_layer = norm_layer or partial(nn.LayerNorm, eps=1e-6)
self.embed_dim = embed_dim
self.depth = depth
if hybrid_backbone is not None:
self.patch_embed = HybridEmbed(
hybrid_backbone, img_size=img_size, in_chans=in_chans, embed_dim=embed_dim)
else:
self.patch_embed1 = PatchEmbed(
img_size=img_size[0], patch_size=patch_size[0], in_chans=in_chans, embed_dim=embed_dim[0])
self.patch_embed2 = PatchEmbed(
img_size=img_size[1], patch_size=patch_size[1], in_chans=embed_dim[0], embed_dim=embed_dim[1])
self.patch_embed3 = PatchEmbed(
img_size=img_size[2], patch_size=patch_size[2], in_chans=embed_dim[1], embed_dim=embed_dim[2])
self.patch_embed4 = PatchEmbed(
img_size=img_size[3], patch_size=patch_size[3], in_chans=embed_dim[2], embed_dim=embed_dim[3])
num_patches1 = self.patch_embed1.num_patches
num_patches2 = self.patch_embed2.num_patches
num_patches3 = self.patch_embed3.num_patches
num_patches4 = self.patch_embed4.num_patches
self.pos_drop = nn.Dropout(p=drop_rate)
self.mixture =True
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depth))] # stochastic depth decay rule
self.blocks1 = nn.ModuleList([
CBlock(
dim=embed_dim[0], num_heads=num_heads, mlp_ratio=mlp_ratio[0], qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer)
for i in range(depth[0])])
self.blocks2 = nn.ModuleList([
CBlock(
dim=embed_dim[1], num_heads=num_heads, mlp_ratio=mlp_ratio[1], qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i+depth[0]], norm_layer=norm_layer)
for i in range(depth[1])])
self.blocks3 = nn.ModuleList([
CBlock(
dim=embed_dim[2], num_heads=num_heads, mlp_ratio=mlp_ratio[2], qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i+depth[0]+depth[1]], norm_layer=norm_layer)
for i in range(depth[2])])
self.blocks4 = nn.ModuleList([
MixBlock(
dim=embed_dim[3], num_heads=num_heads, mlp_ratio=mlp_ratio[3], qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i+depth[0]+depth[1]+depth[2]], norm_layer=norm_layer)
for i in range(depth[3])])
self.norm = nn.BatchNorm2d(embed_dim[-1])
# Representation layer
if representation_size:
self.num_features = representation_size
self.pre_logits = nn.Sequential(OrderedDict([
('fc', nn.Linear(embed_dim, representation_size)),
('act', nn.Tanh())
]))
else:
self.pre_logits = nn.Identity()
# Classifier head
self.head = nn.Linear(embed_dim[-1], num_classes) if num_classes > 0 else nn.Identity()
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
@torch.jit.ignore
def no_weight_decay(self):
return {'pos_embed', 'cls_token'}
def get_classifier(self):
return self.head
def reset_classifier(self, num_classes, global_pool=''):
self.num_classes = num_classes
self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity()
def forward_features(self, x):
B = x.shape[0]
x = self.patch_embed1(x)
x = self.pos_drop(x)
for blk in self.blocks1:
x = blk(x)
x = self.patch_embed2(x)
for blk in self.blocks2:
x = blk(x)
x = self.patch_embed3(x)
for blk in self.blocks3:
x = blk(x)
x = self.patch_embed4(x)
for blk in self.blocks4:
x = blk(x)
x = self.norm(x)
x = self.pre_logits(x)
return x
def forward(self, x):
x = self.forward_features(x)
x = x.flatten(2).mean(-1)
x = self.head(x)
return x
@register_model
def container_v1_light(pretrained=False, **kwargs):
model = VisionTransformer(
img_size=[224, 56, 28, 14], patch_size=[4, 2, 2, 2], embed_dim=[64, 128, 320, 512], depth=[3, 4, 8, 3], num_heads=16, mlp_ratio=[8, 8, 4, 4], qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
model.default_cfg = _cfg()
if pretrained:
checkpoint = torch.hub.load_state_dict_from_url(
url="https://dl.fbaipublicfiles.com/deit/deit_tiny_patch16_224-a1311bcf.pth",
map_location="cpu", check_hash=True
)
model.load_state_dict(checkpoint["model"])
return model
| 18,794 | 44.071942 | 164 | py |
container | container-main/samplers.py | # Copyright (c) 2015-present, Facebook, Inc.
# All rights reserved.
import torch
import torch.distributed as dist
import math
class RASampler(torch.utils.data.Sampler):
"""Sampler that restricts data loading to a subset of the dataset for distributed,
with repeated augmentation.
It ensures that different each augmented version of a sample will be visible to a
different process (GPU)
Heavily based on torch.utils.data.DistributedSampler
"""
def __init__(self, dataset, num_replicas=None, rank=None, shuffle=True):
if num_replicas is None:
if not dist.is_available():
raise RuntimeError("Requires distributed package to be available")
num_replicas = dist.get_world_size()
if rank is None:
if not dist.is_available():
raise RuntimeError("Requires distributed package to be available")
rank = dist.get_rank()
self.dataset = dataset
self.num_replicas = num_replicas
self.rank = rank
self.epoch = 0
self.num_samples = int(math.ceil(len(self.dataset) * 3.0 / self.num_replicas))
self.total_size = self.num_samples * self.num_replicas
# self.num_selected_samples = int(math.ceil(len(self.dataset) / self.num_replicas))
self.num_selected_samples = int(math.floor(len(self.dataset) // 256 * 256 / self.num_replicas))
self.shuffle = shuffle
def __iter__(self):
# deterministically shuffle based on epoch
g = torch.Generator()
g.manual_seed(self.epoch)
if self.shuffle:
indices = torch.randperm(len(self.dataset), generator=g).tolist()
else:
indices = list(range(len(self.dataset)))
# add extra samples to make it evenly divisible
indices = [ele for ele in indices for i in range(3)]
indices += indices[:(self.total_size - len(indices))]
assert len(indices) == self.total_size
# subsample
indices = indices[self.rank:self.total_size:self.num_replicas]
assert len(indices) == self.num_samples
return iter(indices[:self.num_selected_samples])
def __len__(self):
return self.num_selected_samples
def set_epoch(self, epoch):
self.epoch = epoch
| 2,292 | 37.216667 | 103 | py |
MAgent | MAgent-master/examples/train_against.py | """
Train a model to against existing benchmark
"""
import argparse
import time
import os
import logging as log
import math
import numpy as np
import magent
from magent.builtin.rule_model import RandomActor
def generate_map(env, map_size, handles):
width = height = map_size
init_num = map_size * map_size * 0.04
gap = 3
leftID, rightID = 0, 1
# add left square of agents
n = init_num
side = int(math.sqrt(n)) * 2
pos = []
for x in range(width//2 - gap - side, width//2 - gap - side + side, 2):
for y in range((height - side)//2, (height - side)//2 + side, 2):
pos.append([x, y, 0])
env.add_agents(handles[leftID], method="custom", pos=pos)
# add right square of agents
n = init_num
side = int(math.sqrt(n)) * 2
pos = []
for x in range(width//2 + gap, width//2 + gap + side, 2):
for y in range((height - side)//2, (height - side)//2 + side, 2):
pos.append([x, y, 0])
env.add_agents(handles[rightID], method="custom", pos=pos)
def play_a_round(env, map_size, handles, models, print_every, eps, step_batch_size=None, train=True,
train_id=1, render=False):
"""play a round of game"""
env.reset()
generate_map(env, map_size, handles)
step_ct = 0
done = False
n = len(handles)
obs = [[] for _ in range(n)]
ids = [[] for _ in range(n)]
acts = [[] for _ in range(n)]
nums = [env.get_num(handle) for handle in handles]
total_reward = [0 for _ in range(n)]
n_transition = 0
pos_reward_num = 0
total_loss, value = 0, 0
print("===== sample =====")
print("eps %s number %s" % (eps, nums))
start_time = time.time()
while not done:
# take actions for every model
for i in range(n):
obs[i] = env.get_observation(handles[i])
ids[i] = env.get_agent_id(handles[i])
# let models infer action in parallel (non-blocking)
models[i].infer_action(obs[i], ids[i], 'e_greedy', eps[i], block=False)
for i in range(n):
acts[i] = models[i].fetch_action() # fetch actions (blocking)
env.set_action(handles[i], acts[i])
# simulate one step
done = env.step()
# sample
step_reward = []
for i in range(n):
rewards = env.get_reward(handles[i])
if train and i == train_id:
alives = env.get_alive(handles[train_id])
# store samples in replay buffer (non-blocking)
models[train_id].sample_step(rewards, alives, block=False)
pos_reward_num += len(rewards[rewards > 0])
s = sum(rewards)
step_reward.append(s)
total_reward[i] += s
# render
if render:
env.render()
# stat info
nums = [env.get_num(handle) for handle in handles]
n_transition += nums[train_id]
# clear dead agents
env.clear_dead()
# check return message of previous called non-blocking function sample_step()
if train:
models[train_id].check_done()
if step_ct % print_every == 0:
print("step %3d, nums: %s reward: %s, total_reward: %s, pos_rewards %d" %
(step_ct, nums, np.around(step_reward, 2), np.around(total_reward, 2),
pos_reward_num))
step_ct += 1
if step_ct > args.n_step:
break
if step_batch_size and n_transition > step_batch_size and train:
total_loss, value = models[train_id].train(500)
n_transition = 0
sample_time = time.time() - start_time
print("steps: %d, total time: %.2f, step average %.2f" % (step_ct, sample_time, sample_time / step_ct))
# train
if train:
print("===== train =====")
start_time = time.time()
total_loss, value = models[train_id].train(500)
train_time = time.time() - start_time
print("train_time %.2f" % train_time)
return magent.round(total_loss), nums, magent.round(total_reward), magent.round(value)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--save_every", type=int, default=5)
parser.add_argument("--render_every", type=int, default=10)
parser.add_argument("--n_round", type=int, default=600)
parser.add_argument("--n_step", type=int, default=550)
parser.add_argument("--render", action="store_true")
parser.add_argument("--load_from", type=int)
parser.add_argument("--train", action="store_true")
parser.add_argument("--map_size", type=int, default=125)
parser.add_argument("--greedy", action="store_true")
parser.add_argument("--name", type=str, default="against")
parser.add_argument("--eval", action="store_true")
parser.add_argument("--opponent", type=int, default=0)
parser.add_argument('--alg', default='dqn', choices=['dqn', 'drqn', 'a2c'])
args = parser.parse_args()
# download opponent model
magent.utility.check_model('against')
# set logger
magent.utility.init_logger(args.name)
# init the game
env = magent.GridWorld("battle", map_size=args.map_size)
env.set_render_dir("build/render")
# two groups of agents
handles = env.get_handles()
# sample eval observation set
if args.eval:
print("sample eval set...")
env.reset()
generate_map(env, args.map_size, handles)
eval_obs = magent.utility.sample_observation(env, handles, n_obs=2048, step=500)
else:
eval_obs = [None, None]
# init models
names = [args.name + "-a", "battle"]
batch_size = 512
unroll_step = 16
train_freq = 5
models = []
# load opponent
if args.opponent >= 0:
from magent.builtin.tf_model import DeepQNetwork
models.append(magent.ProcessingModel(env, handles[1], names[1], 20000, 0, DeepQNetwork))
models[0].load("data/battle_model", args.opponent)
else:
models.append(magent.ProcessingModel(env, handles[1], names[1], 20000, 0, RandomActor))
# load our model
if args.alg == 'dqn':
from magent.builtin.tf_model import DeepQNetwork
models.append(magent.ProcessingModel(env, handles[0], names[0], 20001, 1000, DeepQNetwork,
batch_size=batch_size,
learning_rate=3e-4,
memory_size=2 ** 20, train_freq=train_freq, eval_obs=eval_obs[0]))
step_batch_size = None
elif args.alg == 'drqn':
from magent.builtin.tf_model import DeepRecurrentQNetwork
models.append(magent.ProcessingModel(env, handles[0], names[0], 20001, 1000, DeepRecurrentQNetwork,
batch_size=batch_size/unroll_step, unroll_step=unroll_step,
learning_rate=3e-4,
memory_size=4 * 625, train_freq=train_freq, eval_obs=eval_obs[0]))
step_batch_size = None
elif args.alg == 'a2c':
from magent.builtin.mx_model import AdvantageActorCritic
step_batch_size = 10 * args.map_size * args.map_size * 0.04
models.append(magent.ProcessingModel(env, handles[0], names[0], 20001, 1000, AdvantageActorCritic,
learning_rate=1e-3))
# load if
savedir = 'save_model'
if args.load_from is not None:
start_from = args.load_from
print("load ... %d" % start_from)
models[0].load(savedir, start_from)
else:
start_from = 0
# print debug info
print(args)
print("view_size", env.get_view_space(handles[0]))
print("feature_size", env.get_feature_space(handles[0]))
# play
start = time.time()
for k in range(start_from, start_from + args.n_round):
tic = time.time()
start = 1 if args.opponent != -1 else 0.1
train_eps = magent.utility.piecewise_decay(k, [0, 100, 250], [start, 0.1, 0.05]) if not args.greedy else 0
opponent_eps = train_eps if k < 100 else 0.05 # can use curriculum learning in first 100 steps
loss, num, reward, value = play_a_round(env, args.map_size, handles, models,
eps=[opponent_eps, train_eps], step_batch_size=step_batch_size,
train=args.train,
print_every=50,
render=args.render or (k+1) % args.render_every == 0) # for e-greedy
log.info("round %d\t loss: %s\t num: %s\t reward: %s\t value: %s" % (k, loss, num, reward, value))
print("round time %.2f total time %.2f\n" % (time.time() - tic, time.time() - start))
# save models
if (k + 1) % args.save_every == 0 and args.train:
print("save model... ")
if not os.path.exists(savedir):
os.mkdir(savedir)
for model in models:
model.save(savedir, k)
# close model processing
for model in models:
model.quit()
| 9,190 | 35.328063 | 117 | py |
MAgent | MAgent-master/examples/train_arrange.py | """
Train agents to arrange themselves into a specific message
"""
import argparse
import logging as log
import time
import random
import numpy as np
import magent
from magent.builtin.tf_model import DeepQNetwork as RLModel
from magent.utility import FontProvider
def remove_wall(d, cur_pos, wall_set, unit):
if d == 0:
for i in range(0, unit):
for j in range(0, unit):
temp = (cur_pos[0] + i, cur_pos[1] + unit + j)
if temp in wall_set:
wall_set.remove(temp)
elif d == 1:
for i in range(0, unit):
for j in range(0, unit):
temp = (cur_pos[0] - unit + i, cur_pos[1] + j)
if temp in wall_set:
wall_set.remove(temp)
elif d == 2:
for i in range(0, unit):
for j in range(0, unit):
temp = (cur_pos[0] + i, cur_pos[1] - unit + j)
if temp in wall_set:
wall_set.remove(temp)
elif d == 3:
for i in range(0, unit):
for j in range(0, unit):
temp = (cur_pos[0] + unit + i, cur_pos[1] + j)
if temp in wall_set:
wall_set.remove(temp)
def dfs(x, y, width, height, unit, wall_set):
pos = set()
trace = list()
pos.add((x, y))
trace.append((x, y))
max_x = x + width
max_y = y + height
d = random.choice(range(4))
pos_list = []
flag = 0
while len(trace) > 0:
if flag == 4:
cur_pos = trace[-1]
trace.pop()
if random.choice(range(2)) == 0:
remove_wall(d, cur_pos, wall_set, unit)
flag = 0
if len(trace) == 0:
break
cur_pos = list(trace[-1])
if d == 0:
cur_pos[1] = max(y, cur_pos[1] - 2 * unit)
elif d == 1:
cur_pos[0] = min(max_x, cur_pos[0] + 2 * unit)
elif d == 2:
cur_pos[1] = min(max_y, cur_pos[1] + 2 * unit)
elif d == 3:
cur_pos[0] = max(x, cur_pos[0] - 2 * unit)
if tuple(cur_pos) in pos:
d = (d + 1) % 4
flag += 1
else:
remove_wall(d, cur_pos, wall_set, unit)
trace.append(tuple(cur_pos))
pos.add(tuple(cur_pos))
d = random.choice(range(4))
def clean_pos_set_convert_to_list(pos_set, pos_list):
for v in pos_list:
if v in pos_set:
pos_set.remove(v)
return list(pos_set)
def draw_line(x, y, width, height):
pos_set = []
for r in range(height):
for c in range(width):
pos_set.append((x + c, y + r))
return pos_set
def open_the_door(x_s, y_s, w, h, unit):
pos_list = []
n_door = 15
random_horizon_list_x = [x_s + (2 * np.random.choice(w // 2 // unit, n_door) + 1) * unit, x_s + (2 * np.random.choice(w // 2 // unit, n_door) - 1) * unit]
random_vertical_list_y = [y_s + (2 * np.random.choice(h // 2 // unit, n_door) + 1) * unit, y_s + (2 * np.random.choice(h // 2 // unit, n_door) + 1) * unit]
y_e = y_s + h - unit
for v in random_horizon_list_x[0]:
pos_list.extend([(v, y_s), (v + 1, y_s), (v, y_s + 1), (v + 1, y_s + 1)])
for v in random_horizon_list_x[1]:
pos_list.extend([(v, y_e), (v + 1, y_e), (v, y_e + 1), (v + 1, y_e + 1)])
x_e = x_s + w - unit
for v in random_vertical_list_y[0]:
pos_list.extend([(x_s, v), (x_s, v + 1), (x_s + 1, v), (x_s + 1, v + 1)])
for v in random_vertical_list_y[1]:
pos_list.extend([(x_e, v), (x_e, v + 1), (x_e + 1, v), (x_e + 1, v + 1)])
return pos_list
def create_maze(pos, width, height, unit, font_area):
# draw block: with rect: left(x), top(y), width, height
pos_set = []
for i in range(height):
if i % 2 == 0:
pos_set.extend(draw_line(pos[0], pos[1] + i * unit, width * unit, unit))
pos_set.extend(draw_line(pos[0], pos[1] + font_area[1] + i * unit, width * unit, unit))
pos_set.extend(draw_line(pos[0] + i * unit, pos[1] + height * unit, unit, font_area[1]))
pos_set.extend(draw_line(pos[0] + font_area[0] + i * unit, pos[1] + height * unit, unit, font_area[1]))
for i in range(width):
if i % 2 == 0:
pos_set.extend(draw_line(pos[0] + i * unit, pos[1], unit, height * unit))
pos_set.extend(draw_line(pos[0] + i * unit, pos[1] + font_area[1], unit, height * unit))
pos_set.extend(draw_line(pos[0], pos[1] + i * unit, height * unit, unit))
pos_set.extend(draw_line(pos[0] + font_area[0], pos[1] + i * unit, height * unit, unit))
pos_set = set(pos_set)
dfs(pos[0] + 2, pos[1] + 2, (width - 1) * unit, (height - 1) * unit, unit, pos_set) # north
dfs(pos[0] + 2, pos[1] + (height - 2) * unit, (height - 1) * unit, (width + 3) * unit, unit, pos_set) # west
dfs(pos[0] + height * unit, pos[1] + font_area[1] - unit, (width - height) * unit, (height - 1) * unit, unit, pos_set) # south
dfs(pos[0] + font_area[0] - unit, pos[1] + (height - 2) * unit, (height - 1) * unit, font_area[1] - (height + 1) * unit, unit, pos_set) # east
temp = []
temp.extend(open_the_door(pos[0], pos[1], font_area[0] + height * unit, font_area[1] + height * unit, unit))
res = clean_pos_set_convert_to_list(pos_set, temp)
return res
def draw_split_line(x, y, width, height, split=10):
pos_set = []
if height > width:
splits = set(np.random.choice(height // 2, split) * 2)
for r in range(height):
if r in splits or (r - 1 in splits):
continue
for c in range(width):
pos_set.append((x + c, y + r))
else:
splits = set(np.random.choice(width // 2, split) * 2)
for r in range(height):
for c in range(width):
if c in splits or (c - 1 in splits):
continue
pos_set.append((x + c, y + r))
return pos_set
def create_naive_maze(pos, width, height, unit, font_area):
pos_set = []
for i in range(height):
if i % 2 == 0:
pos_set.extend(draw_split_line(pos[0], pos[1] + i * unit, width * unit, unit))
pos_set.extend(draw_split_line(pos[0], pos[1] + font_area[1] + i * unit, width * unit, unit))
pos_set.extend(draw_split_line(pos[0] + i * unit, pos[1] + height * unit, unit, font_area[1] - height * unit))
pos_set.extend(draw_split_line(pos[0] + font_area[0] + i * unit, pos[1] + height * unit, unit, font_area[1] - height * unit))
return pos_set
def load_config(map_size):
gw = magent.gridworld
cfg = gw.Config()
cfg.set({"map_width": map_size, "map_height": map_size})
cfg.set({"minimap_mode": True})
cfg.set({"embedding_size": 12})
goal = cfg.register_agent_type(
"goal",
{'width': 1, 'length': 1,
'can_absorb': True
}
)
agent = cfg.register_agent_type(
"agent",
{'width': 1, 'length': 1, 'hp': 10, 'speed': 2,
'view_range': gw.CircleRange(6),
'step_recover': -10.0/400,
'step_reward': 0,
})
g_goal = cfg.add_group(goal)
g_agent = cfg.add_group(agent)
g = gw.AgentSymbol(g_goal, 'any')
a = gw.AgentSymbol(g_agent, 'any')
cfg.add_reward_rule(gw.Event(a, 'collide', g), receiver=a, value=10)
return cfg
def generate_map(env, map_size, goal_handle, handles):
# random message
font = FontProvider('data/font_8x8/basic.txt')
n_msg = random.randint(1, 4)
messages = []
for i in range(n_msg):
length = random.randint(2, 9)
tmp = []
for j in range(length):
tmp.append(random.randint(0x20, 0x7E))
messages.append(tmp)
center_x, center_y = map_size // 2, map_size // 2
# create maze: left pos, width, height
radius = 90
pos_list = create_maze([center_x - radius, center_y - radius], radius + 1, 15, 2, font_area=[radius * 2 - 28, radius * 2 - 28])
env.add_walls(method="custom", pos=pos_list)
def add_square(pos, side, gap):
side = int(side)
for x in range(center_x - side//2, center_x + side//2 + 1, gap):
pos.append([x, center_y - side//2])
pos.append([x, center_y + side//2])
for y in range(center_y - side//2, center_y + side//2 + 1, gap):
pos.append([center_x - side//2, y])
pos.append([center_x + side//2, y])
# goal
pos = []
add_square(pos, map_size * 0.75, 10)
add_square(pos, map_size * 0.61, 10)
add_square(pos, map_size * 0.47, 12)
add_square(pos, map_size * 0.39, 12)
env.add_agents(goal_handle, method="custom", pos=pos)
circle_goal_num = env.get_num(goal_handle)
def draw(base_x, base_y, scale, data):
w, h = len(data), len(data[0])
pos = []
for i in range(w):
for j in range(h):
if data[i][j] == 1:
start_x = i * scale + base_y
start_y = j * scale + base_x
for x in range(start_x, start_x + scale):
for y in range(start_y, start_y + scale):
pos.append([y, x])
env.add_agents(goal_handle, method="custom", pos=pos)
base_y = (map_size - len(messages) * font.height) / 2
for message in messages:
base_x = (map_size - len(message) * font.width) / 2
scale = 1
for x in message:
data = font.get(x)
draw(base_x, base_y, scale, data)
base_x += font.width
base_y += font.height + 1
alpha_goal_num = env.get_num(goal_handle) - circle_goal_num
# agent
pos = []
add_square(pos, map_size * 0.95, 1)
add_square(pos, map_size * 0.9, 1)
add_square(pos, map_size * 0.85, 1)
add_square(pos, map_size * 0.80, 1)
pos = np.array(pos)
pos = pos[np.random.choice(np.arange(len(pos)), int(circle_goal_num + alpha_goal_num * 1.25), replace=False)]
env.add_agents(handles[0], method="custom", pos=pos)
def play_a_round(env, map_size, food_handle, handles, models, train_id=-1,
print_every=10, record=False, render=False, eps=None):
env.reset()
generate_map(env, map_size, food_handle, handles)
step_ct = 0
total_reward = 0
done = False
pos_reward_ct = set()
n = len(handles)
obs = [None for _ in range(n)]
ids = [None for _ in range(n)]
acts = [None for _ in range(n)]
nums = [env.get_num(handle) for handle in handles]
sample_buffer = magent.utility.EpisodesBuffer(capacity=5000)
center_x, center_y = map_size // 2, map_size // 2
print("===== sample =====")
print("eps %s number %s" % (eps, nums))
start_time = time.time()
new_rule_ct = 0
last_base_reward = {}
while not done:
# take actions for every model
for i in range(n):
obs[i] = env.get_observation(handles[i])
ids[i] = env.get_agent_id(handles[i])
# rule modification signal
if new_rule_ct > 0:
obs[i][1][:, 10:12] = 0
else:
obs[i][1][:, 10:12] = 1
acts[i] = models[i].infer_action(obs[i], ids[i], policy='e_greedy', eps=eps)
env.set_action(handles[i], acts[i])
# simulate one step
done = env.step()
# get num of goal
goal_num = env.get_num(food_handle)
# get rewards
rewards = env.get_reward(handles[train_id])
for id, r in zip(ids[train_id], rewards):
if r > 0.05 and id not in pos_reward_ct:
pos_reward_ct.add(id)
# let away (modify reward)
if 1.0 * len(pos_reward_ct) / goal_num >= 0.99:
new_rule_ct += 1
pos = env.get_pos(handles[train_id])
normalizer = center_x ** 2 + center_y ** 2
for i in range(len(pos)):
x, y = pos[i]
rate = 1 - 1.0 * ((x - center_x) ** 2 + (y - center_y) ** 2) / normalizer
delta = -rate * 4.5 - 0.5
rewards[i] += delta
# last_base_reward[ids[train_id][i]] = delta
# sample: with new reward
step_reward = 0
if train_id != -1:
alives = env.get_alive(handles[train_id])
total_reward += sum(rewards)
sample_buffer.record_step(ids[train_id], obs[train_id], acts[train_id], rewards, alives)
step_reward = sum(rewards)
# render
if render:
env.render()
# clear dead agents
env.clear_dead()
# stats info
for i in range(n):
nums[i] = env.get_num(handles[i])
if step_ct % print_every == 0:
print("step %3d, train %d, num %s, reward %.2f, total_reward: %.2f, non_zero: %d" %
(step_ct, train_id, [goal_num] + nums, step_reward, total_reward, len(pos_reward_ct)))
step_ct += 1
sample_time = time.time() - start_time
print("steps: %d, new_rule: %d, total time: %.2f, step average %.2f" % (step_ct, new_rule_ct, sample_time, sample_time / step_ct))
if record:
with open("reward-hunger.txt", "a") as fout:
fout.write(str(nums[0]) + "\n")
# train
total_loss = value = 0
if train_id != -1:
print("===== train =====")
start_time = time.time()
total_loss, value = models[train_id].train(sample_buffer, print_every=250)
train_time = time.time() - start_time
print("train_time %.2f" % train_time)
return total_loss, total_reward, value, len(pos_reward_ct), 1.0 * len(pos_reward_ct) / goal_num
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--save_every", type=int, default=2)
parser.add_argument("--render_every", type=int, default=2)
parser.add_argument("--n_round", type=int, default=2000)
parser.add_argument("--render", action='store_true')
parser.add_argument("--load_from", type=int)
parser.add_argument("--train", action="store_true")
parser.add_argument("--print_every", type=int, default=100)
parser.add_argument("--map_size", type=int, default=250)
parser.add_argument("--greedy", action="store_true")
parser.add_argument("--name", type=str, default="arrange")
parser.add_argument("--record", action="store_true")
parser.add_argument("--eval", action="store_true")
args = parser.parse_args()
# set logger
log.basicConfig(level=log.INFO, filename=args.name + '.log')
console = log.StreamHandler()
console.setLevel(log.INFO)
log.getLogger('').addHandler(console)
# init env
env = magent.GridWorld(load_config(map_size=args.map_size))
env.set_render_dir("build/render")
handles = env.get_handles()
food_handle = handles[0]
player_handles = handles[1:]
# sample eval observation set
eval_obs = None
if args.eval:
print("sample eval set...")
env.reset()
generate_map(env, args.map_size, food_handle, player_handles)
eval_obs = magent.utility.sample_observation(env, player_handles, 0, 2048, 500)
# load models
models = [
RLModel(env, player_handles[0], args.name,
batch_size=512, memory_size=2 ** 20, target_update=1000,
train_freq=4, eval_obs=eval_obs)
]
# load saved model
save_dir = "save_model"
if args.load_from is not None:
start_from = args.load_from
print("load models...")
for model in models:
model.load(save_dir, start_from)
else:
start_from = 0
# print debug info
print(args)
print('view_space', env.get_view_space(player_handles[0]))
print('feature_space', env.get_feature_space(player_handles[0]))
print('view2attack', env.get_view2attack(player_handles[0]))
if args.record:
for k in range(4, 999 + 5, 5):
eps = 0
for model in models:
model.load(save_dir, start_from)
play_a_round(env, args.map_size, food_handle, player_handles, models,
-1, record=True, render=False,
print_every=args.print_every, eps=eps)
else:
# play
start = time.time()
train_id = 0 if args.train else -1
for k in range(start_from, start_from + args.n_round):
tic = time.time()
eps = magent.utility.piecewise_decay(k, [0, 400, 1200], [1.0, 0.2, 0.10]) if not args.greedy else 0
loss, reward, value, pos_reward_ct, fill_rate = \
play_a_round(env, args.map_size, food_handle, player_handles, models,
train_id, record=False,
render=args.render or (k+1) % args.render_every == 0,
print_every=args.print_every, eps=eps)
log.info("round %d\t loss: %.3f\t reward: %.2f\t value: %.3f\t pos_reward_ct: %d\t fill: %.2f"
% (k, loss, reward, value, pos_reward_ct, fill_rate))
print("round time %.2f total time %.2f\n" % (time.time() - tic, time.time() - start))
if (k + 1) % args.save_every == 0 and args.train:
print("save models...")
for model in models:
model.save(save_dir, k)
| 17,415 | 34.398374 | 159 | py |
MAgent | MAgent-master/examples/train_multi.py | """
A battle contains four types of agents
"""
import argparse
import time
import logging as log
import math
import numpy as np
import magent
def load_config(map_size):
gw = magent.gridworld
cfg = gw.Config()
cfg.set({"map_width": map_size, "map_height": map_size})
cfg.set({"minimap_mode": True})
cfg.set({"embedding_size": 10})
melee = cfg.register_agent_type(
"melee",
{'width': 1, 'length': 1, 'hp': 10, 'speed': 1,
'view_range': gw.CircleRange(6), 'attack_range': gw.CircleRange(1),
'damage': 2, 'step_recover': 0.1, 'attack_in_group': True,
'step_reward': -0.01, 'kill_reward': 0, 'dead_penalty': -0.1, 'attack_penalty': -1,
})
ranged = cfg.register_agent_type(
"ranged",
{'width': 1, 'length': 1, 'hp': 3, 'speed': 2,
'view_range': gw.CircleRange(6), 'attack_range': gw.CircleRange(2),
'damage': 2, 'step_recover': 0.1, 'attack_in_group': True,
'step_reward': -0.01, 'kill_reward': 0, 'dead_penalty': -0.1, 'attack_penalty': -1,
})
g0 = cfg.add_group(melee)
g1 = cfg.add_group(ranged)
g2 = cfg.add_group(melee)
g3 = cfg.add_group(ranged)
arm0_0 = gw.AgentSymbol(g0, index='any')
arm0_1 = gw.AgentSymbol(g1, index='any')
arm1_0 = gw.AgentSymbol(g2, index='any')
arm1_1 = gw.AgentSymbol(g3, index='any')
# reward shaping
cfg.add_reward_rule(gw.Event(arm0_0, 'attack', arm1_0), receiver=arm0_0, value=2)
cfg.add_reward_rule(gw.Event(arm0_0, 'attack', arm1_1), receiver=arm0_0, value=2)
cfg.add_reward_rule(gw.Event(arm0_1, 'attack', arm1_0), receiver=arm0_1, value=2)
cfg.add_reward_rule(gw.Event(arm0_1, 'attack', arm1_1), receiver=arm0_1, value=2)
cfg.add_reward_rule(gw.Event(arm1_0, 'attack', arm0_0), receiver=arm1_0, value=2)
cfg.add_reward_rule(gw.Event(arm1_0, 'attack', arm0_1), receiver=arm1_0, value=2)
cfg.add_reward_rule(gw.Event(arm1_1, 'attack', arm0_0), receiver=arm1_1, value=2)
cfg.add_reward_rule(gw.Event(arm1_1, 'attack', arm0_1), receiver=arm1_1, value=2)
# kill reward
cfg.add_reward_rule(gw.Event(arm0_0, 'kill', arm1_0), receiver=arm0_0, value=100)
cfg.add_reward_rule(gw.Event(arm0_0, 'kill', arm1_1), receiver=arm0_0, value=100)
cfg.add_reward_rule(gw.Event(arm0_1, 'kill', arm1_0), receiver=arm0_1, value=100)
cfg.add_reward_rule(gw.Event(arm0_1, 'kill', arm1_1), receiver=arm0_1, value=100)
cfg.add_reward_rule(gw.Event(arm1_0, 'kill', arm0_0), receiver=arm1_0, value=100)
cfg.add_reward_rule(gw.Event(arm1_0, 'kill', arm0_1), receiver=arm1_0, value=100)
cfg.add_reward_rule(gw.Event(arm1_1, 'kill', arm0_0), receiver=arm1_1, value=100)
cfg.add_reward_rule(gw.Event(arm1_1, 'kill', arm0_1), receiver=arm1_1, value=100)
return cfg
def generate_map(env, map_size, handles):
width = map_size
height = map_size
init_num = map_size * map_size * 0.04
gap = 3
# left
n = init_num
side = int(math.sqrt(n)) * 2
pos = [[], []]
ct = 0
for x in range(width//2 - gap - side, width//2 - gap - side + side, 2):
for y in range((height - side)//2, (height - side)//2 + side, 2):
pos[ct % 2].append([x, y])
ct += 1
env.add_agents(handles[0], method="custom", pos=pos[0])
env.add_agents(handles[1], method="custom", pos=pos[1])
# right
n = init_num
side = int(math.sqrt(n)) * 2
pos = [[], []]
ct = 0
for x in range(width//2 + gap, width//2 + gap + side, 2):
for y in range((height - side)//2, (height - side)//2 + side, 2):
pos[ct % 2].append([x, y])
ct += 1
env.add_agents(handles[2], method="custom", pos=pos[0])
env.add_agents(handles[3], method="custom", pos=pos[1])
def play_a_round(env, map_size, handles, conns, print_every, train=True, render=False, eps=None):
env.reset()
generate_map(env, map_size, handles)
step_ct = 0
done = False
n = len(handles)
obs = [[] for _ in range(n)]
ids = [[] for _ in range(n)]
acts = [[] for _ in range(n)]
nums = [env.get_num(handle) for handle in handles]
total_reward = [0 for _ in range(n)]
print("===== sample =====")
print("eps %.2f number %s" % (eps, nums))
start_time = time.time()
while not done:
# take actions for every model
for i in range(n):
obs[i] = env.get_observation(handles[i])
ids[i] = env.get_agent_id(handles[i])
# let models infer action in parallel (non-blocking)
models[i].infer_action(obs[i], ids[i], 'e_greedy', eps, block=False)
for i in range(n):
acts[i] = models[i].fetch_action() # fetch actions (blocking)
env.set_action(handles[i], acts[i])
# simulate one step
done = env.step()
# sample
step_reward = []
for i in range(n):
rewards = env.get_reward(handles[i])
if train:
alives = env.get_alive(handles[i])
# store samples in replay buffer (non-blocking)
models[i].sample_step(rewards, alives, block=False)
s = sum(rewards)
step_reward.append(s)
total_reward[i] += s
# render
if render:
env.render()
# stat info
nums = [env.get_num(handle) for handle in handles]
# clear dead agents
env.clear_dead()
# check return message of previous called non-blocking function sample_step()
if args.train:
for model in models:
model.check_done()
if step_ct % print_every == 0:
print("step %3d, nums: %s reward: %s, total_reward: %s " %
(step_ct, nums, np.around(step_reward, 2), np.around(total_reward, 2)))
step_ct += 1
if step_ct > 550:
break
sample_time = time.time() - start_time
print("steps: %d, total time: %.2f, step average %.2f" % (step_ct, sample_time, sample_time / step_ct))
# train
total_loss, value = [0 for _ in range(n)], [0 for _ in range(n)]
if train:
print("===== train =====")
start_time = time.time()
# train models in parallel
for i in range(n):
models[i].train(print_every=500, block=False)
for i in range(n):
total_loss[i], value[i] = models[i].fetch_train()
train_time = time.time() - start_time
print("train_time %.2f" % train_time)
def round_list(l): return [round(x, 2) for x in l]
return round_list(total_loss), nums, round_list(total_reward), round_list(value)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--save_every", type=int, default=5)
parser.add_argument("--render_every", type=int, default=10)
parser.add_argument("--n_round", type=int, default=2000)
parser.add_argument("--render", action="store_true")
parser.add_argument("--load_from", type=int)
parser.add_argument("--train", action="store_true")
parser.add_argument("--map_size", type=int, default=125)
parser.add_argument("--greedy", action="store_true")
parser.add_argument("--name", type=str, default="battle")
parser.add_argument("--eval", action="store_true")
parser.add_argument('--alg', default='dqn', choices=['dqn', 'drqn', 'a2c'])
args = parser.parse_args()
# set logger
magent.utility.init_logger(args.name)
# init the game
env = magent.GridWorld(load_config(args.map_size))
env.set_render_dir("build/render")
# two groups of agents
handles = env.get_handles()
# sample eval observation set
eval_obs = [None for _ in range(len(handles))]
if args.eval:
print("sample eval set...")
env.reset()
generate_map(env, args.map_size, handles)
eval_obs = magent.utility.sample_observation(env, handles, 2048, 500)
# load models
batch_size = 256
unroll_step = 8
target_update = 1000
train_freq = 5
if args.alg == 'dqn':
from magent.builtin.tf_model import DeepQNetwork
RLModel = DeepQNetwork
base_args = {'batch_size': batch_size,
'memory_size': 2 ** 20,
'target_update': target_update, 'train_freq': train_freq}
elif args.alg == 'drqn':
from magent.builtin.tf_model import DeepRecurrentQNetwork
RLModel = DeepRecurrentQNetwork
base_args = {'batch_size': batch_size / unroll_step, 'unroll_step': unroll_step,
'memory_size': 8 * 300,
'target_update': target_update, 'train_freq': train_freq}
elif args.alg == 'a2c':
raise NotImplementedError
else:
raise NotImplementedError
# load models
names = [args.name + "-l0", args.name + "-l1", args.name + "-r0", args.name + "-r1"]
models = []
for i in range(len(names)):
model_args = {'eval_obs': eval_obs[i]}
model_args.update(base_args)
models.append(magent.ProcessingModel(env, handles[i], names[i], 20000+i, 1000, RLModel, **model_args))
# load if
savedir = 'save_model'
if args.load_from is not None:
start_from = args.load_from
print("load ... %d" % start_from)
for model in models:
model.load(savedir, start_from)
else:
start_from = 0
# print state info
print(args)
print("view_size", env.get_view_space(handles[0]))
print("feature_size", env.get_feature_space(handles[0]))
# play
start = time.time()
for k in range(start_from, start_from + args.n_round):
tic = time.time()
eps = magent.utility.piecewise_decay(k, [0, 700, 1400], [1, 0.2, 0.05]) if not args.greedy else 0
loss, num, reward, value = play_a_round(env, args.map_size, handles, models,
train=args.train, print_every=50,
render=args.render or (k+1) % args.render_every == 0,
eps=eps) # for e-greedy
log.info("round %d\t loss: %s\t num: %s\t reward: %s\t value: %s" % (k, loss, num, reward, value))
print("round time %.2f total time %.2f\n" % (time.time() - tic, time.time() - start))
# save models
if (k + 1) % args.save_every == 0 and args.train:
print("save model... ")
for model in models:
model.save(savedir, k)
# send quit command
for model in models:
model.quit()
| 10,600 | 34.454849 | 110 | py |
MAgent | MAgent-master/examples/train_battle_game.py | """
Train script of the battle game
"""
import argparse
import time
import logging as log
import math
import numpy as np
import magent
from magent.builtin.tf_model import DeepQNetwork, DeepRecurrentQNetwork
def generate_map(env, map_size, handles):
width = map_size
height = map_size
init_num = 20
gap = 3
leftID, rightID = 0, 1
# left
pos = []
for y in range(10, 45):
pos.append((width / 2 - 5, y))
pos.append((width / 2 - 4, y))
for y in range(50, height // 2 + 25):
pos.append((width / 2 - 5, y))
pos.append((width / 2 - 4, y))
for y in range(height // 2 - 25, height - 50):
pos.append((width / 2 + 5, y))
pos.append((width / 2 + 4, y))
for y in range(height - 45, height - 10):
pos.append((width / 2 + 5, y))
pos.append((width / 2 + 4, y))
env.add_walls(pos=pos, method="custom")
n = init_num
side = int(math.sqrt(n)) * 2
pos = []
for x in range(width//2 - gap - side, width//2 - gap - side + side, 2):
for y in range((height - side)//2, (height - side)//2 + side, 2):
pos.append([x, y, 0])
env.add_agents(handles[leftID], method="custom", pos=pos)
# right
n = init_num
side = int(math.sqrt(n)) * 2
pos = []
for x in range(width//2 + gap, width//2 + gap + side, 2):
for y in range((height - side)//2, (height - side)//2 + side, 2):
pos.append([x, y, 0])
env.add_agents(handles[rightID], method="custom", pos=pos)
def play_a_round(env, map_size, handles, models, print_every, train=True, render=False, eps=None):
env.reset()
generate_map(env, map_size, handles)
step_ct = 0
done = False
n = len(handles)
obs = [[] for _ in range(n)]
ids = [[] for _ in range(n)]
acts = [[] for _ in range(n)]
nums = [env.get_num(handle) for handle in handles]
total_reward = [0 for _ in range(n)]
print("===== sample =====")
print("eps %.2f number %s" % (eps, nums))
start_time = time.time()
counter = 10
while not done:
# take actions for every model
for i in range(n):
obs[i] = env.get_observation(handles[i])
ids[i] = env.get_agent_id(handles[i])
# let models infer action in parallel (non-blocking)
models[i].infer_action(obs[i], ids[i], 'e_greedy', eps, block=False)
for i in range(n):
acts[i] = models[i].fetch_action() # fetch actions (blocking)
env.set_action(handles[i], acts[i])
# simulate one step
done = env.step()
# sample
step_reward = []
for i in range(n):
rewards = env.get_reward(handles[i])
pos = env.get_pos(handles[i])
for (x, y) in pos:
rewards -= ((1.0 * x / map_size - 0.5) ** 2 + (1.0 * y / map_size - 0.5) ** 2) / 100
if train:
alives = env.get_alive(handles[i])
# store samples in replay buffer (non-blocking)
models[i].sample_step(rewards, alives, block=False)
s = sum(rewards)
step_reward.append(s)
total_reward[i] += s
# render
if render:
env.render()
# stat info
nums = [env.get_num(handle) for handle in handles]
# clear dead agents
env.clear_dead()
# check return message of previous called non-blocking function sample_step()
if args.train:
for model in models:
model.check_done()
if step_ct % print_every == 0:
print("step %3d, nums: %s reward: %s, total_reward: %s " %
(step_ct, nums, np.around(step_reward, 2), np.around(total_reward, 2)))
step_ct += 1
if step_ct % 50 == 0 and counter >= 0:
counter -= 1
g = 1
pos = []
x = np.random.randint(0, map_size - 1)
y = np.random.randint(0, map_size - 1)
for i in range(-4, 4):
for j in range(-4, 4):
pos.append((x + i, y + j))
env.add_agents(handles[g ^ 1], method="custom", pos=pos)
pos = []
x = np.random.randint(0, map_size - 1)
y = np.random.randint(0, map_size - 1)
for i in range(-4, 4):
for j in range(-4, 4):
pos.append((x + i, y + j))
env.add_agents(handles[g], method="custom", pos=pos)
step_ct = 0
if step_ct > 500:
break
sample_time = time.time() - start_time
print("steps: %d, total time: %.2f, step average %.2f" % (step_ct, sample_time, sample_time / step_ct))
# train
total_loss, value = [0 for _ in range(n)], [0 for _ in range(n)]
if train:
print("===== train =====")
start_time = time.time()
# train models in parallel
for i in range(n):
models[i].train(print_every=1000, block=False)
for i in range(n):
total_loss[i], value[i] = models[i].fetch_train()
train_time = time.time() - start_time
print("train_time %.2f" % train_time)
def round_list(l): return [round(x, 2) for x in l]
return round_list(total_loss), nums, round_list(total_reward), round_list(value)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--save_every", type=int, default=5)
parser.add_argument("--render_every", type=int, default=10)
parser.add_argument("--n_round", type=int, default=1500)
parser.add_argument("--render", action="store_true")
parser.add_argument("--load_from", type=int)
parser.add_argument("--train", action="store_true")
parser.add_argument("--map_size", type=int, default=125)
parser.add_argument("--greedy", action="store_true")
parser.add_argument("--name", type=str, default="battle")
parser.add_argument("--eval", action="store_true")
parser.add_argument('--alg', default='dqn', choices=['dqn', 'drqn', 'a2c'])
args = parser.parse_args()
# set logger
magent.utility.init_logger(args.name)
# init the game
env = magent.GridWorld("battle", map_size=args.map_size)
env.set_render_dir("build/render")
# two groups of agents
handles = env.get_handles()
# sample eval observation set
eval_obs = [None, None]
if args.eval:
print("sample eval set...")
env.reset()
generate_map(env, args.map_size, handles)
for i in range(len(handles)):
eval_obs[i] = magent.utility.sample_observation(env, handles, 2048, 500)
# load models
batch_size = 256
unroll_step = 8
target_update = 1200
train_freq = 5
if args.alg == 'dqn':
RLModel = DeepQNetwork
base_args = {'batch_size': batch_size,
'memory_size': 2 ** 21, 'learning_rate': 1e-4,
'target_update': target_update, 'train_freq': train_freq}
elif args.alg == 'drqn':
RLModel = DeepRecurrentQNetwork
base_args = {'batch_size': batch_size / unroll_step, 'unroll_step': unroll_step,
'memory_size': 8 * 625, 'learning_rate': 1e-4,
'target_update': target_update, 'train_freq': train_freq}
elif args.alg == 'a2c':
raise NotImplementedError
else:
raise NotImplementedError
# init models
names = [args.name + "-l", args.name + "-r"]
models = []
for i in range(len(names)):
model_args = {'eval_obs': eval_obs[i]}
model_args.update(base_args)
models.append(magent.ProcessingModel(env, handles[i], names[i], 20000, 1000, RLModel, **model_args))
# load if
savedir = 'save_model'
if args.load_from is not None:
start_from = args.load_from
print("load ... %d" % start_from)
for model in models:
model.load(savedir, start_from)
else:
start_from = 0
# print state info
print(args)
print("view_space", env.get_view_space(handles[0]))
print("feature_space", env.get_feature_space(handles[0]))
# play
start = time.time()
for k in range(start_from, start_from + args.n_round):
tic = time.time()
eps = magent.utility.piecewise_decay(k, [0, 600, 1200], [1, 0.2, 0.1]) if not args.greedy else 0
loss, num, reward, value = play_a_round(env, args.map_size, handles, models,
train=args.train, print_every=50,
render=args.render or (k+1) % args.render_every == 0,
eps=eps) # for e-greedy
log.info("round %d\t loss: %s\t num: %s\t reward: %s\t value: %s" % (k, loss, num, reward, value))
print("round time %.2f total time %.2f\n" % (time.time() - tic, time.time() - start))
# save models
if (k + 1) % args.save_every == 0 and args.train:
print("save model... ")
for model in models:
model.save(savedir, k)
# send quit command
for model in models:
model.quit()
| 9,203 | 32.714286 | 109 | py |
MAgent | MAgent-master/examples/train_tiger.py | """
Double attack, tigers get reward when they attack a same deer
"""
import argparse
import time
import logging as log
import numpy as np
import magent
from magent.builtin.rule_model import RandomActor
def generate_map(env, map_size, handles):
env.add_walls(method="random", n=map_size*map_size*0.04)
env.add_agents(handles[0], method="random", n=map_size*map_size*0.05)
env.add_agents(handles[1], method="random", n=map_size*map_size*0.01)
def play_a_round(env, map_size, handles, models, print_every, train_id=1, step_batch_size=None, render=False, eps=None):
env.reset()
generate_map(env, map_size, handles)
step_ct = 0
total_reward = 0
done = False
total_loss = value = 0
n = len(handles)
obs = [[] for _ in range(n)]
ids = [[] for _ in range(n)]
acts = [[] for _ in range(n)]
nums = [0 for _ in range(n)]
sample_buffer = magent.utility.EpisodesBuffer(10000)
n_transition = 0
print("===== sample =====")
print("eps %s" % eps)
start_time = time.time()
while not done:
# take actions for every model
for i in range(n):
if i == 0:
temp_num = env.get_num(handles[i])
obs[i] = (np.empty(temp_num), np.empty(temp_num))
else:
obs[i] = env.get_observation(handles[i])
ids[i] = env.get_agent_id(handles[i])
acts[i] = models[i].infer_action(obs[i], ids[i], policy='e_greedy', eps=eps)
env.set_action(handles[i], acts[i])
# simulate one step
done = env.step()
# sample
reward = 0
if train_id != -1:
rewards = env.get_reward(handles[train_id])
alives = env.get_alive(handles[train_id])
total_reward += sum(rewards)
sample_buffer.record_step(ids[train_id], obs[train_id], acts[train_id], rewards, alives)
reward = sum(rewards)
# render
if render:
env.render()
# clear dead agents
env.clear_dead()
# stats info
for i in range(n):
nums[i] = env.get_num(handles[i])
n_transition += nums[train_id]
if step_ct % print_every == 0:
print("step %3d, deer: %5d, tiger: %5d, train_id: %d, reward: %.2f, total_reward: %.2f " %
(step_ct, nums[0], nums[1], train_id, reward, total_reward))
step_ct += 1
if step_ct > 1000:
break
if step_batch_size and n_transition > step_batch_size and train_id != -1:
total_loss, value = models[train_id].train(sample_buffer, 500)
sample_buffer.reset()
n_transition = 0
sample_time = time.time() - start_time
print("steps: %d, total time: %.2f, step average %.2f" % (step_ct, sample_time, sample_time / step_ct))
# train
if train_id != -1:
print("===== train =====")
start_time = time.time()
total_loss, value = models[train_id].train(sample_buffer)
train_time = time.time() - start_time
print("train_time %.2f" % train_time)
return total_loss, total_reward, value
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--save_every", type=int, default=5)
parser.add_argument("--n_round", type=int, default=200)
parser.add_argument("--render", action="store_true")
parser.add_argument("--load_from", type=int)
parser.add_argument("--train", action="store_true")
parser.add_argument("--greedy", action="store_true")
parser.add_argument("--map_size", type=int, default=500)
parser.add_argument("--name", type=str, default="tiger")
parser.add_argument('--alg', default='dqn', choices=['dqn', 'drqn', 'a2c'])
args = parser.parse_args()
# init the game
env = magent.GridWorld("double_attack", map_size=args.map_size)
env.set_render_dir("build/render")
# two groups of animal
deer_handle, tiger_handle = env.get_handles()
# init two models
models = [
RandomActor(env, deer_handle, tiger_handle),
]
batch_size = 512
unroll = 8
if args.alg == 'dqn':
from magent.builtin.tf_model import DeepQNetwork
models.append(DeepQNetwork(env, tiger_handle, "tiger",
batch_size=batch_size,
memory_size=2 ** 20, learning_rate=4e-4))
step_batch_size = None
elif args.alg == 'drqn':
from magent.builtin.tf_model import DeepRecurrentQNetwork
models.append(DeepRecurrentQNetwork(env, tiger_handle, "tiger",
batch_size=batch_size/unroll, unroll_step=unroll,
memory_size=20000, learning_rate=4e-4))
step_batch_size = None
elif args.alg == 'a2c':
from magent.builtin.mx_model import AdvantageActorCritic
step_batch_size = int(10 * args.map_size * args.map_size*0.01)
models.append(AdvantageActorCritic(env, tiger_handle, "tiger",
batch_size=step_batch_size,
learning_rate=1e-2))
else:
raise NotImplementedError
# load if
savedir = 'save_model'
if args.load_from is not None:
start_from = args.load_from
print("load ... %d" % start_from)
for model in models:
model.load(savedir, start_from)
else:
start_from = 0
# init logger
magent.utility.init_logger(args.name)
# print debug info
print(args)
print("view_size", env.get_view_space(tiger_handle))
# play
train_id = 1 if args.train else -1
start = time.time()
for k in range(start_from, start_from + args.n_round):
tic = time.time()
eps = magent.utility.linear_decay(k, 10, 0.1) if not args.greedy else 0
loss, reward, value = play_a_round(env, args.map_size, [deer_handle, tiger_handle], models,
step_batch_size=step_batch_size, train_id=train_id,
print_every=40, render=args.render,
eps=eps)
log.info("round %d\t loss: %s\t reward: %s\t value: %s" % (k, loss, reward, value))
print("round time %.2f total time %.2f\n" % (time.time() - tic, time.time() - start))
if (k + 1) % args.save_every == 0:
print("save model... ")
for model in models:
model.save(savedir, k)
| 6,540 | 33.792553 | 120 | py |
MAgent | MAgent-master/examples/train_single.py | """
Train a single model by self-play
"""
import argparse
import time
import os
import logging as log
import math
import numpy as np
import magent
def generate_map(env, map_size, handles):
""" generate a map, which consists of two squares of agents"""
width = height = map_size
init_num = map_size * map_size * 0.04
gap = 3
# left
n = init_num
side = int(math.sqrt(n)) * 2
pos = []
for x in range(width//2 - gap - side, width//2 - gap - side + side, 2):
for y in range((height - side)//2, (height - side)//2 + side, 2):
pos.append([x, y, 0])
env.add_agents(handles[0], method="custom", pos=pos)
# right
n = init_num
side = int(math.sqrt(n)) * 2
pos = []
for x in range(width//2 + gap, width//2 + gap + side, 2):
for y in range((height - side)//2, (height - side)//2 + side, 2):
pos.append([x, y, 0])
env.add_agents(handles[1], method="custom", pos=pos)
def play_a_round(env, map_size, handles, models, print_every, train=True, render=False, eps=None):
env.reset()
generate_map(env, map_size, handles)
step_ct = 0
done = False
n = len(handles)
obs = [[] for _ in range(n)]
ids = [[] for _ in range(n)]
acts = [[] for _ in range(n)]
nums = [env.get_num(handle) for handle in handles]
sample_buffer = magent.utility.EpisodesBuffer(capacity=1500)
total_reward = [0 for _ in range(n)]
print("===== sample =====")
print("eps %.2f number %s" % (eps, nums))
start_time = time.time()
while not done:
# take actions for every model
for i in range(n):
obs[i] = env.get_observation(handles[i])
ids[i] = env.get_agent_id(handles[i])
acts[i] = models[i].infer_action(obs[i], ids[i], 'e_greedy', eps=eps)
env.set_action(handles[i], acts[i])
# simulate one step
done = env.step()
# sample
step_reward = []
for i in range(n):
rewards = env.get_reward(handles[i])
if train:
alives = env.get_alive(handles[i])
sample_buffer.record_step(ids[i], obs[i], acts[i], rewards, alives)
s = sum(rewards)
step_reward.append(s)
total_reward[i] += s
# render
if render:
env.render()
# stat info
nums = [env.get_num(handle) for handle in handles]
# clear dead agents
env.clear_dead()
if step_ct % print_every == 0:
print("step %3d, nums: %s reward: %s, total_reward: %s " %
(step_ct, nums, np.around(step_reward, 2), np.around(total_reward, 2)))
step_ct += 1
if step_ct > 550:
break
sample_time = time.time() - start_time
print("steps: %d, total time: %.2f, step average %.2f" % (step_ct, sample_time, sample_time / step_ct))
# train
total_loss, value = 0, 0
if train:
print("===== train =====")
start_time = time.time()
total_loss, value = models[0].train(sample_buffer, 1000)
train_time = time.time() - start_time
print("train_time %.2f" % train_time)
def round_list(l): return [round(x, 2) for x in l]
return total_loss, nums, round_list(total_reward), value
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--save_every", type=int, default=5)
parser.add_argument("--render_every", type=int, default=10)
parser.add_argument("--n_round", type=int, default=2000)
parser.add_argument("--render", action="store_true")
parser.add_argument("--load_from", type=int)
parser.add_argument("--train", action="store_true")
parser.add_argument("--map_size", type=int, default=125)
parser.add_argument("--greedy", action="store_true")
parser.add_argument("--name", type=str, default="battle")
parser.add_argument("--eval", action="store_true")
parser.add_argument('--alg', default='dqn', choices=['dqn', 'drqn'])
args = parser.parse_args()
# set logger
log.basicConfig(level=log.INFO, filename=args.name + '.log')
console = log.StreamHandler()
console.setLevel(log.INFO)
log.getLogger('').addHandler(console)
# init the game
env = magent.GridWorld("battle", map_size=args.map_size)
env.set_render_dir("build/render")
# two groups of agents
handles = env.get_handles()
# sample eval observation set
eval_obs = None
if args.eval:
print("sample eval set...")
env.reset()
generate_map(env, args.map_size, handles)
eval_obs = magent.utility.sample_observation(env, handles, 2048, 500)[0]
# init models
batch_size = 512
unroll_step = 8
target_update = 1200
train_freq = 5
models = []
if args.alg == 'dqn':
from magent.builtin.tf_model import DeepQNetwork
models.append(DeepQNetwork(env, handles[0], args.name,
batch_size=batch_size,
learning_rate=3e-4,
memory_size=2 ** 21, target_update=target_update,
train_freq=train_freq, eval_obs=eval_obs))
elif args.alg == 'drqn':
from magent.builtin.tf_model import DeepRecurrentQNetwork
models.append(DeepRecurrentQNetwork(env, handles[0], args.name,
learning_rate=3e-4,
batch_size=batch_size/unroll_step, unroll_step=unroll_step,
memory_size=2 * 8 * 625, target_update=target_update,
train_freq=train_freq, eval_obs=eval_obs))
else:
# see train_against.py to know how to use a2c
raise NotImplementedError
models.append(models[0])
# load if
savedir = 'save_model'
if args.load_from is not None:
start_from = args.load_from
print("load ... %d" % start_from)
for model in models:
model.load(savedir, start_from)
else:
start_from = 0
# print debug info
print(args)
print("view_space", env.get_view_space(handles[0]))
print("feature_space", env.get_feature_space(handles[0]))
# play
start = time.time()
for k in range(start_from, start_from + args.n_round):
tic = time.time()
eps = magent.utility.piecewise_decay(k, [0, 700, 1400], [1, 0.2, 0.05]) if not args.greedy else 0
loss, num, reward, value = play_a_round(env, args.map_size, handles, models,
train=args.train, print_every=50,
render=args.render or (k+1) % args.render_every == 0,
eps=eps) # for e-greedy
log.info("round %d\t loss: %s\t num: %s\t reward: %s\t value: %s" % (k, loss, num, reward, value))
print("round time %.2f total time %.2f\n" % (time.time() - tic, time.time() - start))
# save models
if (k + 1) % args.save_every == 0 and args.train:
print("save model... ")
for model in models:
model.save(savedir, k)
| 7,237 | 33.303318 | 109 | py |
MAgent | MAgent-master/examples/show_battle_game.py | """
Interactive game, Pygame are required.
Act like a general and dispatch your solders.
"""
import os
import magent
from magent.renderer import PyGameRenderer
from magent.renderer.server import BattleServer as Server
if __name__ == "__main__":
magent.utility.check_model('battle-game')
PyGameRenderer().start(Server())
| 332 | 19.8125 | 57 | py |
MAgent | MAgent-master/examples/train_pursuit.py | """
Pursuit: predators get reward when they attack prey.
"""
import argparse
import time
import logging as log
import numpy as np
import magent
from magent.builtin.tf_model import DeepQNetwork
def play_a_round(env, map_size, handles, models, print_every, train=True, render=False, eps=None):
env.reset()
env.add_walls(method="random", n=map_size * map_size * 0.03)
env.add_agents(handles[0], method="random", n=map_size * map_size * 0.0125)
env.add_agents(handles[1], method="random", n=map_size * map_size * 0.025)
step_ct = 0
done = False
n = len(handles)
obs = [[] for _ in range(n)]
ids = [[] for _ in range(n)]
acts = [[] for _ in range(n)]
nums = [env.get_num(handle) for handle in handles]
total_reward = [0 for _ in range(n)]
print("===== sample =====")
print("eps %s number %s" % (eps, nums))
start_time = time.time()
while not done:
# take actions for every model
for i in range(n):
obs[i] = env.get_observation(handles[i])
ids[i] = env.get_agent_id(handles[i])
# let models infer action in parallel (non-blocking)
models[i].infer_action(obs[i], ids[i], 'e_greedy', eps, block=False)
for i in range(n):
acts[i] = models[i].fetch_action() # fetch actions (blocking)
env.set_action(handles[i], acts[i])
# simulate one step
done = env.step()
# sample
step_reward = []
for i in range(n):
rewards = env.get_reward(handles[i])
if train:
alives = env.get_alive(handles[i])
# store samples in replay buffer (non-blocking)
models[i].sample_step(rewards, alives, block=False)
s = sum(rewards)
step_reward.append(s)
total_reward[i] += s
# render
if render:
env.render()
# clear dead agents
env.clear_dead()
# check 'done' returned by 'sample' command
if train:
for model in models:
model.check_done()
if step_ct % print_every == 0:
print("step %3d, reward: %s, total_reward: %s " %
(step_ct, np.around(step_reward, 2), np.around(total_reward, 2)))
step_ct += 1
if step_ct > 250:
break
sample_time = time.time() - start_time
print("steps: %d, total time: %.2f, step average %.2f" % (step_ct, sample_time, sample_time / step_ct))
# train
total_loss, value = [0 for _ in range(n)], [0 for _ in range(n)]
if train:
print("===== train =====")
start_time = time.time()
# train models in parallel
for i in range(n):
models[i].train(print_every=2000, block=False)
for i in range(n):
total_loss[i], value[i] = models[i].fetch_train()
train_time = time.time() - start_time
print("train_time %.2f" % train_time)
return magent.round(total_loss), magent.round(total_reward), magent.round(value)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--save_every", type=int, default=2)
parser.add_argument("--render_every", type=int, default=10)
parser.add_argument("--n_round", type=int, default=500)
parser.add_argument("--render", action="store_true")
parser.add_argument("--load_from", type=int)
parser.add_argument("--train", action="store_true")
parser.add_argument("--map_size", type=int, default=1000)
parser.add_argument("--greedy", action="store_true")
parser.add_argument("--eval", action="store_true")
parser.add_argument("--name", type=str, default="pursuit")
args = parser.parse_args()
# set logger
magent.utility.init_logger(args.name)
# init the game
env = magent.GridWorld("pursuit", map_size=args.map_size)
env.set_render_dir("build/render")
# two groups of agents
handles = env.get_handles()
# load models
names = ["predator", "prey"]
models = []
for i in range(len(names)):
models.append(magent.ProcessingModel(
env, handles[i], names[i], 20000+i, 4000, DeepQNetwork,
batch_size=512, memory_size=2 ** 22,
target_update=1000, train_freq=4
))
# load if
savedir = 'save_model'
if args.load_from is not None:
start_from = args.load_from
print("load ... %d" % start_from)
for model in models:
model.load(savedir, start_from)
else:
start_from = 0
# print debug info
print(args)
print("view_space", env.get_view_space(handles[0]))
print("feature_space", env.get_feature_space(handles[0]))
# play
start = time.time()
for k in range(start_from, start_from + args.n_round):
tic = time.time()
eps = magent.utility.piecewise_decay(k, [0, 200, 400], [1, 0.2, 0.05]) if not args.greedy else 0
loss, reward, value = play_a_round(env, args.map_size, handles, models,
print_every=50, train=args.train,
render=args.render or (k+1) % args.render_every == 0,
eps=eps) # for e-greedy
log.info("round %d\t loss: %s\t reward: %s\t value: %s" % (k, loss, reward, value))
print("round time %.2f total time %.2f\n" % (time.time() - tic, time.time() - start))
if (k + 1) % args.save_every == 0 and args.train:
print("save model... ")
for model in models:
model.save(savedir, k)
# send quit command
for model in models:
model.quit()
| 5,713 | 32.22093 | 109 | py |
MAgent | MAgent-master/examples/show_arrange.py | """
Show arrange, pygame are required.
Type messages and let agents to arrange themselves to form these characters
"""
import os
import sys
import argparse
import magent
from magent.renderer import PyGameRenderer
from magent.renderer.server import ArrangeServer as Server
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--mode", type=int, default=0, help="0: without maze, 1: adding a maze")
parser.add_argument("--mess", type=str, nargs="+", help="words you wanna print", required=True)
args = parser.parse_args()
magent.utility.check_model('arrange')
PyGameRenderer().start(Server(messages=args.mess, mode=args.mode), grid_size=3.5)
| 699 | 29.434783 | 99 | py |
MAgent | MAgent-master/examples/api_demo.py | """
First demo, show the usage of API
"""
import magent
# try:
# from magent.builtin.mx_model import DeepQNetwork
# except ImportError as e:
from magent.builtin.tf_model import DeepQNetwork
if __name__ == "__main__":
map_size = 100
# init the game "pursuit" (config file are stored in python/magent/builtin/config/)
env = magent.GridWorld("pursuit", map_size=map_size)
env.set_render_dir("build/render")
# get group handles
predator, prey = env.get_handles()
# init env and agents
env.reset()
env.add_walls(method="random", n=map_size * map_size * 0.01)
env.add_agents(predator, method="random", n=map_size * map_size * 0.02)
env.add_agents(prey, method="random", n=map_size * map_size * 0.02)
# init two models
model1 = DeepQNetwork(env, predator, "predator")
model2 = DeepQNetwork(env, prey, "prey")
# load trained model
model1.load("data/pursuit_model")
model2.load("data/pursuit_model")
done = False
step_ct = 0
print("nums: %d vs %d" % (env.get_num(predator), env.get_num(prey)))
while not done:
# take actions for deers
obs_1 = env.get_observation(predator)
ids_1 = env.get_agent_id(predator)
acts_1 = model1.infer_action(obs_1, ids_1)
env.set_action(predator, acts_1)
# take actions for tigers
obs_2 = env.get_observation(prey)
ids_2 = env.get_agent_id(prey)
acts_2 = model2.infer_action(obs_2, ids_1)
env.set_action(prey, acts_2)
# simulate one step
done = env.step()
# render
env.render()
# get reward
reward = [sum(env.get_reward(predator)), sum(env.get_reward(prey))]
# clear dead agents
env.clear_dead()
# print info
if step_ct % 10 == 0:
print("step: %d\t predators' reward: %d\t preys' reward: %d" %
(step_ct, reward[0], reward[1]))
step_ct += 1
if step_ct > 250:
break
| 2,019 | 27.055556 | 88 | py |
MAgent | MAgent-master/examples/train_battle.py | """
Train battle, two models in two processes
"""
import argparse
import time
import logging as log
import math
import numpy as np
import magent
leftID, rightID = 0, 1
def generate_map(env, map_size, handles):
""" generate a map, which consists of two squares of agents"""
width = height = map_size
init_num = map_size * map_size * 0.04
gap = 3
global leftID, rightID
leftID, rightID = rightID, leftID
# left
n = init_num
side = int(math.sqrt(n)) * 2
pos = []
for x in range(width//2 - gap - side, width//2 - gap - side + side, 2):
for y in range((height - side)//2, (height - side)//2 + side, 2):
pos.append([x, y, 0])
env.add_agents(handles[leftID], method="custom", pos=pos)
# right
n = init_num
side = int(math.sqrt(n)) * 2
pos = []
for x in range(width//2 + gap, width//2 + gap + side, 2):
for y in range((height - side)//2, (height - side)//2 + side, 2):
pos.append([x, y, 0])
env.add_agents(handles[rightID], method="custom", pos=pos)
def play_a_round(env, map_size, handles, models, print_every, train=True, render=False, eps=None):
"""play a ground and train"""
env.reset()
generate_map(env, map_size, handles)
step_ct = 0
done = False
n = len(handles)
obs = [[] for _ in range(n)]
ids = [[] for _ in range(n)]
acts = [[] for _ in range(n)]
nums = [env.get_num(handle) for handle in handles]
total_reward = [0 for _ in range(n)]
print("===== sample =====")
print("eps %.2f number %s" % (eps, nums))
start_time = time.time()
while not done:
# take actions for every model
for i in range(n):
obs[i] = env.get_observation(handles[i])
ids[i] = env.get_agent_id(handles[i])
# let models infer action in parallel (non-blocking)
models[i].infer_action(obs[i], ids[i], 'e_greedy', eps, block=False)
for i in range(n):
acts[i] = models[i].fetch_action() # fetch actions (blocking)
env.set_action(handles[i], acts[i])
# simulate one step
done = env.step()
# sample
step_reward = []
for i in range(n):
rewards = env.get_reward(handles[i])
if train:
alives = env.get_alive(handles[i])
# store samples in replay buffer (non-blocking)
models[i].sample_step(rewards, alives, block=False)
s = sum(rewards)
step_reward.append(s)
total_reward[i] += s
# render
if render:
env.render()
# stat info
nums = [env.get_num(handle) for handle in handles]
# clear dead agents
env.clear_dead()
# check return message of previous called non-blocking function sample_step()
if args.train:
for model in models:
model.check_done()
if step_ct % print_every == 0:
print("step %3d, nums: %s reward: %s, total_reward: %s " %
(step_ct, nums, np.around(step_reward, 2), np.around(total_reward, 2)))
step_ct += 1
if step_ct > 550:
break
sample_time = time.time() - start_time
print("steps: %d, total time: %.2f, step average %.2f" % (step_ct, sample_time, sample_time / step_ct))
# train
total_loss, value = [0 for _ in range(n)], [0 for _ in range(n)]
if train:
print("===== train =====")
start_time = time.time()
# train models in parallel
for i in range(n):
models[i].train(print_every=1000, block=False)
for i in range(n):
total_loss[i], value[i] = models[i].fetch_train()
train_time = time.time() - start_time
print("train_time %.2f" % train_time)
def round_list(l): return [round(x, 2) for x in l]
return round_list(total_loss), nums, round_list(total_reward), round_list(value)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--save_every", type=int, default=5)
parser.add_argument("--render_every", type=int, default=10)
parser.add_argument("--n_round", type=int, default=2000)
parser.add_argument("--render", action="store_true")
parser.add_argument("--load_from", type=int)
parser.add_argument("--train", action="store_true")
parser.add_argument("--map_size", type=int, default=125)
parser.add_argument("--greedy", action="store_true")
parser.add_argument("--name", type=str, default="battle")
parser.add_argument("--eval", action="store_true")
parser.add_argument('--alg', default='dqn', choices=['dqn', 'drqn', 'a2c'])
args = parser.parse_args()
# set logger
magent.utility.init_logger(args.name)
# init the game
env = magent.GridWorld("battle", map_size=args.map_size)
env.set_render_dir("build/render")
# two groups of agents
handles = env.get_handles()
# sample eval observation set
eval_obs = [None, None]
if args.eval:
print("sample eval set...")
env.reset()
generate_map(env, args.map_size, handles)
for i in range(len(handles)):
eval_obs[i] = magent.utility.sample_observation(env, handles, 2048, 500)
# load models
batch_size = 256
unroll_step = 8
target_update = 1200
train_freq = 5
if args.alg == 'dqn':
from magent.builtin.tf_model import DeepQNetwork
RLModel = DeepQNetwork
base_args = {'batch_size': batch_size,
'memory_size': 2 ** 20, 'learning_rate': 1e-4,
'target_update': target_update, 'train_freq': train_freq}
elif args.alg == 'drqn':
from magent.builtin.tf_model import DeepRecurrentQNetwork
RLModel = DeepRecurrentQNetwork
base_args = {'batch_size': batch_size / unroll_step, 'unroll_step': unroll_step,
'memory_size': 8 * 625, 'learning_rate': 1e-4,
'target_update': target_update, 'train_freq': train_freq}
elif args.alg == 'a2c':
# see train_against.py to know how to use a2c
raise NotImplementedError
# init models
names = [args.name + "-l", args.name + "-r"]
models = []
for i in range(len(names)):
model_args = {'eval_obs': eval_obs[i]}
model_args.update(base_args)
models.append(magent.ProcessingModel(env, handles[i], names[i], 20000+i, 1000, RLModel, **model_args))
# load if
savedir = 'save_model'
if args.load_from is not None:
start_from = args.load_from
print("load ... %d" % start_from)
for model in models:
model.load(savedir, start_from)
else:
start_from = 0
# print state info
print(args)
print("view_space", env.get_view_space(handles[0]))
print("feature_space", env.get_feature_space(handles[0]))
# play
start = time.time()
for k in range(start_from, start_from + args.n_round):
tic = time.time()
eps = magent.utility.piecewise_decay(k, [0, 700, 1400], [1, 0.2, 0.05]) if not args.greedy else 0
loss, num, reward, value = play_a_round(env, args.map_size, handles, models,
train=args.train, print_every=50,
render=args.render or (k+1) % args.render_every == 0,
eps=eps) # for e-greedy
log.info("round %d\t loss: %s\t num: %s\t reward: %s\t value: %s" % (k, loss, num, reward, value))
print("round time %.2f total time %.2f\n" % (time.time() - tic, time.time() - start))
# save models
if (k + 1) % args.save_every == 0 and args.train:
print("save model... ")
for model in models:
model.save(savedir, k)
# send quit command
for model in models:
model.quit()
| 7,958 | 32.868085 | 110 | py |
MAgent | MAgent-master/examples/train_gather.py | """
Train agents to gather food
"""
import argparse
import logging as log
import time
import magent
from magent.builtin.mx_model import DeepQNetwork as RLModel
# change this line to magent.builtin.tf_model to use tensorflow
def load_config(size):
gw = magent.gridworld
cfg = gw.Config()
cfg.set({"map_width": size, "map_height": size})
cfg.set({"minimap_mode": True})
agent = cfg.register_agent_type(
name="agent",
attr={'width': 1, 'length': 1, 'hp': 3, 'speed': 3,
'view_range': gw.CircleRange(7), 'attack_range': gw.CircleRange(1),
'damage': 6, 'step_recover': 0,
'step_reward': -0.01, 'dead_penalty': -1, 'attack_penalty': -0.1,
'attack_in_group': 1})
food = cfg.register_agent_type(
name='food',
attr={'width': 1, 'length': 1, 'hp': 25, 'speed': 0,
'view_range': gw.CircleRange(1), 'attack_range': gw.CircleRange(0),
'kill_reward': 5})
g_f = cfg.add_group(food)
g_s = cfg.add_group(agent)
a = gw.AgentSymbol(g_s, index='any')
b = gw.AgentSymbol(g_f, index='any')
cfg.add_reward_rule(gw.Event(a, 'attack', b), receiver=a, value=0.5)
return cfg
def generate_map(env, map_size, food_handle, handles):
center_x, center_y = map_size // 2, map_size // 2
def add_square(pos, side, gap):
side = int(side)
for x in range(center_x - side//2, center_x + side//2 + 1, gap):
pos.append([x, center_y - side//2])
pos.append([x, center_y + side//2])
for y in range(center_y - side//2, center_y + side//2 + 1, gap):
pos.append([center_x - side//2, y])
pos.append([center_x + side//2, y])
# agent
pos = []
add_square(pos, map_size * 0.9, 3)
add_square(pos, map_size * 0.8, 4)
add_square(pos, map_size * 0.7, 6)
env.add_agents(handles[0], method="custom", pos=pos)
# food
pos = []
add_square(pos, map_size * 0.65, 10)
add_square(pos, map_size * 0.6, 10)
add_square(pos, map_size * 0.55, 10)
add_square(pos, map_size * 0.5, 4)
add_square(pos, map_size * 0.45, 3)
add_square(pos, map_size * 0.4, 1)
add_square(pos, map_size * 0.3, 1)
add_square(pos, map_size * 0.3 - 2, 1)
add_square(pos, map_size * 0.3 - 4, 1)
add_square(pos, map_size * 0.3 - 6, 1)
env.add_agents(food_handle, method="custom", pos=pos)
# legend
legend = [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,],
[1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0,],
[1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0,],
[1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0,],
[1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0,],
[1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0,],
[1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0,],
[1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0,],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,],
]
org = [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,],
]
def draw(base_x, base_y, scale, data):
w, h = len(data), len(data[0])
pos = []
for i in range(w):
for j in range(h):
if data[i][j] == 1:
start_x = i * scale + base_x
start_y = j * scale + base_y
for x in range(start_x, start_x + scale):
for y in range(start_y, start_y + scale):
pos.append([y, x])
env.add_agents(food_handle, method="custom", pos=pos)
scale = 1
w, h = len(legend), len(legend[0])
offset = -3
draw(offset + map_size // 2 - w // 2 * scale, map_size // 2 - h // 2 * scale, scale, legend)
draw(offset + map_size // 2 - w // 2 * scale + len(legend), map_size // 2 - h // 2 * scale, scale, org)
def play_a_round(env, map_size, food_handle, handles, models, train_id=-1,
print_every=10, record=False, render=False, eps=None):
env.reset()
generate_map(env, map_size, food_handle, handles)
step_ct = 0
total_reward = 0
done = False
pos_reward_ct = set()
n = len(handles)
obs = [None for _ in range(n)]
ids = [None for _ in range(n)]
acts = [None for _ in range(n)]
nums = [env.get_num(handle) for handle in handles]
sample_buffer = magent.utility.EpisodesBuffer(capacity=5000)
print("===== sample =====")
print("eps %s number %s" % (eps, nums))
start_time = time.time()
while not done:
# take actions for every model
for i in range(n):
obs[i] = env.get_observation(handles[i])
ids[i] = env.get_agent_id(handles[i])
acts[i] = models[i].infer_action(obs[i], ids[i], policy='e_greedy', eps=eps)
env.set_action(handles[i], acts[i])
# simulate one step
done = env.step()
# sample
rewards = env.get_reward(handles[train_id])
step_reward = 0
if train_id != -1:
alives = env.get_alive(handles[train_id])
total_reward += sum(rewards)
sample_buffer.record_step(ids[train_id], obs[train_id], acts[train_id], rewards, alives)
step_reward = sum(rewards)
# render
if render:
env.render()
for id, r in zip(ids[0], rewards):
if r > 0.05 and id not in pos_reward_ct:
pos_reward_ct.add(id)
# clear dead agents
env.clear_dead()
# stats info
for i in range(n):
nums[i] = env.get_num(handles[i])
food_num = env.get_num(food_handle)
if step_ct % print_every == 0:
print("step %3d, train %d, num %s, reward %.2f, total_reward: %.2f, non_zero: %d" %
(step_ct, train_id, [food_num] + nums, step_reward, total_reward, len(pos_reward_ct)))
step_ct += 1
if step_ct > 350:
break
sample_time = time.time() - start_time
print("steps: %d, total time: %.2f, step average %.2f" % (step_ct, sample_time, sample_time / step_ct))
if record:
with open("reward-hunger.txt", "a") as fout:
fout.write(str(nums[0]) + "\n")
# train
total_loss = value = 0
if train_id != -1:
print("===== train =====")
start_time = time.time()
total_loss, value = models[train_id].train(sample_buffer, print_every=250)
train_time = time.time() - start_time
print("train_time %.2f" % train_time)
return total_loss, total_reward, value, len(pos_reward_ct)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--save_every", type=int, default=2)
parser.add_argument("--render_every", type=int, default=10)
parser.add_argument("--n_round", type=int, default=1500)
parser.add_argument("--render", action='store_true')
parser.add_argument("--load_from", type=int)
parser.add_argument("--train", action="store_true")
parser.add_argument("--print_every", type=int, default=100)
parser.add_argument("--map_size", type=int, default=200)
parser.add_argument("--greedy", action="store_true")
parser.add_argument("--name", type=str, default="gather")
parser.add_argument("--record", action="store_true")
parser.add_argument("--eval", action="store_true")
args = parser.parse_args()
# set logger
log.basicConfig(level=log.INFO, filename=args.name + '.log')
console = log.StreamHandler()
console.setLevel(log.INFO)
log.getLogger('').addHandler(console)
# init env
env = magent.GridWorld(load_config(size=args.map_size))
env.set_render_dir("build/render")
handles = env.get_handles()
food_handle = handles[0]
player_handles = handles[1:]
# sample eval observation set
eval_obs = None
if args.eval:
print("sample eval set...")
env.reset()
generate_map(env, args.map_size, food_handle, player_handles)
eval_obs = magent.utility.sample_observation(env, player_handles, 0, 2048, 500)
# load models
models = [
RLModel(env, player_handles[0], args.name,
batch_size=512, memory_size=2 ** 19, target_update=1000,
train_freq=4, eval_obs=eval_obs)
]
# load saved model
save_dir = "save_model"
if args.load_from is not None:
start_from = args.load_from
print("load models...")
for model in models:
model.load(save_dir, start_from)
else:
start_from = 0
# print debug info
print(args)
print('view_space', env.get_view_space(player_handles[0]))
print('feature_space', env.get_feature_space(player_handles[0]))
print('view2attack', env.get_view2attack(player_handles[0]))
if args.record:
for k in range(4, 999 + 5, 5):
eps = 0
for model in models:
model.load(save_dir, start_from)
play_a_round(env, args.map_size, food_handle, player_handles, models,
-1, record=True, render=False,
print_every=args.print_every, eps=eps)
else:
# play
start = time.time()
train_id = 0 if args.train else -1
for k in range(start_from, start_from + args.n_round):
tic = time.time()
eps = magent.utility.piecewise_decay(k, [0, 400, 1000], [1.0, 0.2, 0.05]) if not args.greedy else 0
loss, reward, value, pos_reward_ct = \
play_a_round(env, args.map_size, food_handle, player_handles, models,
train_id, record=False,
render=args.render or (k+1) % args.render_every == 0,
print_every=args.print_every, eps=eps)
log.info("round %d\t loss: %.3f\t reward: %.2f\t value: %.3f\t pos_reward_ct: %d"
% (k, loss, reward, value, pos_reward_ct))
print("round time %.2f total time %.2f\n" % (time.time() - tic, time.time() - start))
if (k + 1) % args.save_every == 0 and args.train:
print("save models...")
for model in models:
model.save(save_dir, k)
| 12,437 | 40.738255 | 151 | py |
MAgent | MAgent-master/examples/train_trans.py | """
train agents to walk through some walls, avoiding collide
"""
import argparse
import time
import os
import logging as log
import math
import random
import numpy as np
import magent
from magent.builtin.tf_model import DeepQNetwork, DeepRecurrentQNetwork
def get_config(map_size):
gw = magent.gridworld
cfg = gw.Config()
cfg.set({"map_width": map_size * 2, "map_height": map_size})
cfg.set({"minimap_mode": True})
cfg.set({"embedding_size": 10})
agent = cfg.register_agent_type(
"agent",
{'width': 1, 'length': 1, 'hp': 10, 'speed': 1,
'view_range': gw.CircleRange(6),
'damage': 2, 'step_recover': 0.1,
'step_reward': -1,
})
g0 = cfg.add_group(agent)
return cfg
leftID, rightID = 0, 1
def generate_map(env, map_size, handles):
""" generate a map, which consists of two squares of agents and vertical lines"""
width = map_size * 2
height = map_size
margin = map_size * 0.1
line_num = 9
wall_width = 4
gap = 2
road_height = 2
road_num = 4
init_num = margin * height * 0.8
def random_add(x1, x2, y1, y2, n):
added = set()
ct = 0
while ct < n:
x = random.randint(x1, x2)
y = random.randint(y1, y2)
next = (x, y)
if next in added:
continue
added.add(next)
ct += 1
return list(added)
# left
pos = random_add(0, margin, 0, height, init_num)
env.add_agents(handles[leftID], method="custom", pos=pos)
# right
# pos = random_add(width - margin, width, 0, height, init_num)
# env.add_agents(handles[rightID], method="custom", pos=pos)
# wall
lines = set()
low, high = margin * 2 + wall_width, width - margin * 2 - wall_width
ct = 0
while ct < line_num:
next = random.randint(low, high)
collide = False
for j in range(-wall_width - gap, wall_width+gap + 1):
if next+j in lines:
collide = True
break
if collide:
continue
lines.add(next)
ct += 1
lines = list(lines)
walls = []
for item in lines:
road_skip = set()
for i in range(road_num):
road_start = random.randint(1, height-1 - road_height)
for j in range(road_height):
road_skip.add(road_start + j)
for i in range(height):
if i in road_skip:
continue
for j in range(-wall_width//2, wall_width//2 + 1):
walls.append((item+j, i))
env.add_walls(method="custom", pos=walls)
def play_a_round(env, map_size, handles, models, print_every, train=True, render=False, eps=None):
env.reset()
generate_map(env, map_size, handles)
step_ct = 0
done = False
n = len(handles)
obs = [[] for _ in range(n)]
ids = [[] for _ in range(n)]
acts = [[] for _ in range(n)]
nums = [env.get_num(handle) for handle in handles]
sample_buffer = magent.utility.EpisodesBuffer(capacity=1000)
total_reward = [0 for _ in range(n)]
print("===== sample =====")
print("eps %.2f number %s" % (eps, nums))
start_time = time.time()
while not done:
# take actions for every model
for i in range(n):
obs[i] = env.get_observation(handles[i])
ids[i] = env.get_agent_id(handles[i])
acts[i] = models[i].infer_action(obs[i], ids[i], 'e_greedy', eps=eps)
env.set_action(handles[i], acts[i])
# simulate one step
done = env.step()
# sample
step_reward = []
for i in range(n):
rewards = env.get_reward(handles[i])
if train:
alives = env.get_alive(handles[i])
sample_buffer.record_step(ids[i], obs[i], acts[i], rewards, alives)
s = sum(rewards)
step_reward.append(s)
total_reward[i] += s
# render
if render:
env.render()
# stat info
nums = [env.get_num(handle) for handle in handles]
# clear dead agents
env.clear_dead()
if step_ct % print_every == 0:
print("step %3d, nums: %s reward: %s, total_reward: %s " %
(step_ct, nums, np.around(step_reward, 2), np.around(total_reward, 2)))
step_ct += 1
if step_ct > 550:
break
sample_time = time.time() - start_time
print("steps: %d, total time: %.2f, step average %.2f" % (step_ct, sample_time, sample_time / step_ct))
# train
total_loss, value = 0, 0
if train:
print("===== train =====")
start_time = time.time()
total_loss, value = models[0].train(sample_buffer, 500)
train_time = time.time() - start_time
print("train_time %.2f" % train_time)
def round_list(l): return [round(x, 2) for x in l]
return total_loss, nums, round_list(total_reward), value
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--save_every", type=int, default=5)
parser.add_argument("--render_every", type=int, default=10)
parser.add_argument("--n_round", type=int, default=2000)
parser.add_argument("--render", action="store_true")
parser.add_argument("--load_from", type=int)
parser.add_argument("--train", action="store_true")
parser.add_argument("--map_size", type=int, default=60)
parser.add_argument("--greedy", action="store_true")
parser.add_argument("--name", type=str, default="battle")
parser.add_argument("--eval", action="store_true")
parser.add_argument('--alg', default='dqn', choices=['dqn', 'drqn', 'a2c'])
args = parser.parse_args()
# set logger
log.basicConfig(level=log.INFO, filename=args.name + '.log')
console = log.StreamHandler()
console.setLevel(log.INFO)
log.getLogger('').addHandler(console)
# init the game
env = magent.GridWorld(get_config(args.map_size))
env.set_render_dir("build/render")
# two groups of agents
names = [args.name + "-l", args.name + "-r"]
handles = env.get_handles()
# sample eval observation set
eval_obs = None
if args.eval:
print("sample eval set...")
env.reset()
generate_map(env, args.map_size, handles)
eval_obs = magent.utility.sample_observation(env, handles, 2048, 500)[0]
# init models
batch_size = 256
unroll_step = 8
target_update = 1000
train_freq = 5
models = []
if args.alg == 'dqn':
models.append(DeepQNetwork(env, handles[0], "selfplay",
batch_size=batch_size,
memory_size=2 ** 20, target_update=target_update,
train_freq=train_freq, eval_obs=eval_obs))
elif args.alg == 'drqn':
models.append(DeepRecurrentQNetwork(env, handles[0], "selfplay",
batch_size=batch_size/unroll_step, unroll_step=unroll_step,
memory_size=2 * 8 * 625, target_update=target_update,
train_freq=train_freq, eval_obs=eval_obs))
else:
raise NotImplementedError
models.append(models[0])
# load if
savedir = 'save_model'
if args.load_from is not None:
start_from = args.load_from
print("load ... %d" % start_from)
for model in models:
model.load(savedir, start_from)
else:
start_from = 0
# print debug info
print(args)
print("view_space", env.get_view_space(handles[0]))
print("feature_space", env.get_feature_space(handles[0]))
# play
start = time.time()
for k in range(start_from, start_from + args.n_round):
tic = time.time()
eps = magent.utility.piecewise_decay(k, [0, 700, 1400], [1, 0.2, 0.05]) if not args.greedy else 0
loss, num, reward, value = play_a_round(env, args.map_size, handles, models,
train=args.train, print_every=50,
render=args.render or (k+1) % args.render_every == 0,
eps=eps) # for e-greedy
log.info("round %d\t loss: %s\t num: %s\t reward: %s\t value: %s" % (k, loss, num, reward, value))
print("round time %.2f total time %.2f\n" % (time.time() - tic, time.time() - start))
# save models
if (k + 1) % args.save_every == 0 and args.train:
print("save model... ")
for model in models:
model.save(savedir, k)
| 8,723 | 30.723636 | 109 | py |
MAgent | MAgent-master/python/magent/environment.py | """ base class for environment """
class Environment:
"""see subclass for detailed comment"""
def __init__(self):
pass
def reset(self):
pass
# ====== RUN ======
def get_observation(self, handle):
pass
def set_action(self, handle, actions):
pass
def step(self):
pass
def render(self):
pass
def render_next_file(self):
pass
def get_reward(self, handle):
pass
# ====== INFO ======
def get_num(self, handle):
pass
def get_action_space(self, handle):
pass
def get_view_space(self, handle):
pass
def get_feature_space(self, handle):
pass
| 702 | 14.977273 | 43 | py |
MAgent | MAgent-master/python/magent/utility.py | """ some utilities """
import math
import collections
import platform
import numpy as np
import logging
import collections
import os
from magent.builtin.rule_model import RandomActor
class EpisodesBufferEntry:
"""Entry for episode buffer"""
def __init__(self):
self.views = []
self.features = []
self.actions = []
self.rewards = []
self.terminal = False
def append(self, view, feature, action, reward, alive):
self.views.append(view.copy())
self.features.append(feature.copy())
self.actions.append(action)
self.rewards.append(reward)
if not alive:
self.terminal = True
class EpisodesBuffer:
"""Replay buffer to store a whole episode for all agents
one entry for one agent
"""
def __init__(self, capacity):
self.buffer = {}
self.capacity = capacity
self.is_full = False
def record_step(self, ids, obs, acts, rewards, alives):
"""record transitions (s, a, r, terminal) in a step"""
buffer = self.buffer
index = np.random.permutation(len(ids))
if self.is_full: # extract loop invariant in else part
for i in range(len(ids)):
entry = buffer.get(ids[i])
if entry is None:
continue
entry.append(obs[0][i], obs[1][i], acts[i], rewards[i], alives[i])
else:
for i in range(len(ids)):
i = index[i]
entry = buffer.get(ids[i])
if entry is None:
if self.is_full:
continue
else:
entry = EpisodesBufferEntry()
buffer[ids[i]] = entry
if len(buffer) >= self.capacity:
self.is_full = True
entry.append(obs[0][i], obs[1][i], acts[i], rewards[i], alives[i])
def reset(self):
""" clear replay buffer """
self.buffer = {}
self.is_full = False
def episodes(self):
""" get episodes """
return self.buffer.values()
# decay schedulers
def exponential_decay(now_step, total_step, final_value, rate):
"""exponential decay scheduler"""
decay = math.exp(math.log(final_value)/total_step ** rate)
return max(final_value, 1 * decay ** (now_step ** rate))
def linear_decay(now_step, total_step, final_value):
"""linear decay scheduler"""
decay = (1 - final_value) / total_step
return max(final_value, 1 - decay * now_step)
def piecewise_decay(now_step, anchor, anchor_value):
"""piecewise linear decay scheduler
Parameters
---------
now_step : int
current step
anchor : list of integer
step anchor
anchor_value: list of float
value at corresponding anchor
"""
i = 0
while i < len(anchor) and now_step >= anchor[i]:
i += 1
if i == len(anchor):
return anchor_value[-1]
else:
return anchor_value[i-1] + (now_step - anchor[i-1]) * \
((anchor_value[i] - anchor_value[i-1]) / (anchor[i] - anchor[i-1]))
# eval observation set generator
def sample_observation(env, handles, n_obs=-1, step=-1):
"""Sample observations by random actors.
These samples can be used for evaluation
Parameters
----------
env : environment
handles: list of handle
n_obs : int
number of observation
step : int
maximum step
Returns
-------
ret : list of raw observation
raw observation for every group
the format of raw observation is tuple(view, feature)
"""
models = [RandomActor(env, handle) for handle in handles]
n = len(handles)
views = [[] for _ in range(n)]
features = [[] for _ in range(n)]
done = False
step_ct = 0
while not done:
obs = [env.get_observation(handle) for handle in handles]
ids = [env.get_agent_id(handle) for handle in handles]
for i in range(n):
act = models[i].infer_action(obs[i], ids[i])
env.set_action(handles[i], act)
done = env.step()
env.clear_dead()
# record steps
for i in range(n):
views[i].append(obs[i][0])
features[i].append(features[i][1])
if step != -1 and step_ct > step:
break
if step_ct % 100 == 0:
print("sample step %d" % step_ct)
step_ct += 1
for i in range(n):
views[i] = np.array(views[i], dtype=np.float32).reshape((-1,) +
env.get_view_space(handles[i]))
features[i] = np.array(features[i], dtype=np.float32).reshape((-1,) +
env.get_feature_space(handles[i]))
if n_obs != -1:
for i in range(n):
views[i] = views[i][np.random.choice(np.arange(views[i].shape[0]), n_obs)]
features[i] = features[i][np.random.choice(np.arange(features[i].shape[0]), n_obs)]
ret = [(v, f) for v, f in zip(views, features)]
return ret
def init_logger(filename):
""" initialize logger config
Parameters
----------
filename : str
filename of the log
"""
logging.basicConfig(level=logging.INFO, filename=filename + ".log")
console = logging.StreamHandler()
console.setLevel(logging.INFO)
logging.getLogger('').addHandler(console)
def rec_round(x, ndigits=2):
""" round x recursively
Parameters
----------
x: float, int, list, list of list, ...
variable to round, support many types
ndigits: int
precision in decimal digits
"""
if isinstance(x, collections.Iterable):
return [rec_round(item, ndigits) for item in x]
return round(x, ndigits)
def has_gpu():
""" check where has a nvidia gpu """
ret = os.popen("nvidia-smi -L 2>/dev/null").read()
return ret.find("GPU") != -1
def download_file(filename, url):
"""download url to filename"""
print("Download %s from %s..." % (filename, url))
ret = os.system("wget -O %s '%s'" % (filename, url))
if ret != 0:
print("ERROR: wget fails!")
print("If you are an OSX user, you can install wget by 'brew install wget' and retry.")
exit(-1)
else:
print("download done!")
def download_model(url):
"""download model from url"""
name = url.split('/')[-1]
name = os.path.join('data', name)
download_file(name, url)
def do_commond(cmd):
print(cmd)
os.system(cmd)
do_commond("tar xzf %s -C data" % name)
do_commond("rm %s" % name)
def check_model(name):
"""check whether a model is downloaded"""
infos = {
'against':
(('data/battle_model/battle/tfdqn_0.index',),
'https://raw.githubusercontent.com/merrymercy/merrymercy.github.io/master/_data/magent/against-0.tar.gz'),
'battle-game':
(("data/battle_model/trusty-battle-game-l/tfdqn_0.index",
"data/battle_model/trusty-battle-game-r/tfdqn_0.index"),
'https://raw.githubusercontent.com/merrymercy/merrymercy.github.io/master/_data/magent/battle_model.tar.gz'),
'arrange':
(('data/arrange_model/arrange/tfdqn_10.index',),
'https://raw.githubusercontent.com/merrymercy/merrymercy.github.io/master/_data/magent/arrange_game.tar.gz',)
}
if name not in infos:
raise RuntimeError("Unknown model name")
info = infos[name]
missing = False
for check in info[0]:
if not os.path.exists(check):
missing = True
if missing:
download_model(info[1])
class FontProvider:
"""provide pixel font"""
def __init__(self, filename):
data = []
# read raw
with open(filename) as fin:
for line in fin.readlines():
char = []
for x in line.split(','):
char.append(eval(x))
data.append(char)
height = 8
width = 8
# expand bit compress
expand_data = []
for char in data:
expand_char = [[0 for _ in range(width)] for _ in range(height)]
for i in range(width):
for j in range(height):
set = char[i] & (1 << j)
if set:
expand_char[i][j] = 1
expand_data.append(expand_char)
self.data = expand_data
self.width = width
self.height = height
def get(self, i):
if isinstance(i, int):
return self.data[i]
else:
return self.data[ord(i)]
| 8,715 | 27.48366 | 122 | py |
MAgent | MAgent-master/python/magent/gridworld.py | """gridworld interface"""
from __future__ import absolute_import
import ctypes
import os
import importlib
import numpy as np
from .c_lib import _LIB, as_float_c_array, as_int32_c_array
from .environment import Environment
class GridWorld(Environment):
# constant
OBS_INDEX_VIEW = 0
OBS_INDEX_HP = 1
def __init__(self, config, **kwargs):
"""
Parameters
----------
config: str or Config Object
if config is a string, then it is a name of builtin config,
builtin config are stored in python/magent/builtin/config
kwargs are the arguments to the config
if config is a Config Object, then parameters are stored in that object
"""
Environment.__init__(self)
# if is str, load built in configuration
if isinstance(config, str):
# built-in config are stored in python/magent/builtin/config
try:
demo_game = importlib.import_module('magent.builtin.config.' + config)
config = getattr(demo_game, 'get_config')(**kwargs)
except AttributeError:
raise BaseException('unknown built-in game "' + config + '"')
# create new game
game = ctypes.c_void_p()
_LIB.env_new_game(ctypes.byref(game), b"GridWorld")
self.game = game
# set global configuration
config_value_type = {
'map_width': int, 'map_height': int,
'food_mode': bool, 'turn_mode': bool, 'minimap_mode': bool,
'revive_mode': bool, 'goal_mode': bool,
'embedding_size': int,
'render_dir': str,
}
for key in config.config_dict:
value_type = config_value_type[key]
if value_type is int:
_LIB.env_config_game(self.game, key.encode("ascii"), ctypes.byref(ctypes.c_int(config.config_dict[key])))
elif value_type is bool:
_LIB.env_config_game(self.game, key.encode("ascii"), ctypes.byref(ctypes.c_bool(config.config_dict[key])))
elif value_type is float:
_LIB.env_config_game(self.game, key.encode("ascii"), ctypes.byref(ctypes.c_float(config.config_dict[key])))
elif value_type is str:
_LIB.env_config_game(self.game, key.encode("ascii"), ctypes.c_char_p(config.config_dict[key]))
# register agent types
for name in config.agent_type_dict:
type_args = config.agent_type_dict[name]
# special pre-process for view range and attack range
for key in [x for x in type_args.keys()]:
if key == "view_range":
val = type_args[key]
del type_args[key]
type_args["view_radius"] = val.radius
type_args["view_angle"] = val.angle
elif key == "attack_range":
val = type_args[key]
del type_args[key]
type_args["attack_radius"] = val.radius
type_args["attack_angle"] = val.angle
length = len(type_args)
keys = (ctypes.c_char_p * length)(*[key.encode("ascii") for key in type_args.keys()])
values = (ctypes.c_float * length)(*type_args.values())
_LIB.gridworld_register_agent_type(self.game, name.encode("ascii"), length, keys, values)
# serialize event expression, send to C++ engine
self._serialize_event_exp(config)
# init group handles
self.group_handles = []
for item in config.groups:
handle = ctypes.c_int32()
_LIB.gridworld_new_group(self.game, item.encode("ascii"), ctypes.byref(handle))
self.group_handles.append(handle)
# init observation buffer (for acceleration)
self._init_obs_buf()
# init view space, feature space, action space
self.view_space = {}
self.feature_space = {}
self.action_space = {}
buf = np.empty((3,), dtype=np.int32)
for handle in self.group_handles:
_LIB.env_get_info(self.game, handle, b"view_space",
buf.ctypes.data_as(ctypes.POINTER(ctypes.c_int32)))
self.view_space[handle.value] = (buf[0], buf[1], buf[2])
_LIB.env_get_info(self.game, handle, b"feature_space",
buf.ctypes.data_as(ctypes.POINTER(ctypes.c_int32)))
self.feature_space[handle.value] = (buf[0],)
_LIB.env_get_info(self.game, handle, b"action_space",
buf.ctypes.data_as(ctypes.POINTER(ctypes.c_int32)))
self.action_space[handle.value] = (buf[0],)
def reset(self):
"""reset environment"""
_LIB.env_reset(self.game)
def add_walls(self, method, **kwargs):
"""add wall to environment
Parameters
----------
method: str
can be 'random' or 'custom'
if method is 'random', then kwargs["n"] is a int
if method is 'custom', then kwargs["pos"] is a list of coordination
Examples
--------
# add 1000 walls randomly
>>> env.add_walls(method="random", n=1000)
# add 3 walls to (1,2), (4,5) and (9, 8) in map
>>> env.add_walls(method="custom", pos=[(1,2), (4,5), (9,8)])
"""
# handle = -1 for walls
kwargs["dir"] = 0
self.add_agents(-1, method, **kwargs)
# ====== AGENT ======
def new_group(self, name):
"""register a new group into environment"""
handle = ctypes.c_int32()
_LIB.gridworld_new_group(self.game, ctypes.c_char_p(name.encode("ascii")), ctypes.byref(handle))
return handle
def add_agents(self, handle, method, **kwargs):
"""add agents to environment
Parameters
----------
handle: group handle
method: str
can be 'random' or 'custom'
if method is 'random', then kwargs["n"] is a int
if method is 'custom', then kwargs["pos"] is a list of coordination
Examples
--------
# add 1000 walls randomly
>>> env.add_agents(handle, method="random", n=1000)
# add 3 agents to (1,2), (4,5) and (9, 8) in map
>>> env.add_agents(handle, method="custom", pos=[(1,2), (4,5), (9,8)])
"""
if method == "random":
_LIB.gridworld_add_agents(self.game, handle, int(kwargs["n"]), b"random", 0, 0, 0)
elif method == "custom":
n = len(kwargs["pos"])
pos = np.array(kwargs["pos"], dtype=np.int32)
if len(pos) <= 0:
return
if pos.shape[1] == 3: # if has dir
xs, ys, dirs = pos[:, 0], pos[:, 1], pos[:, 2]
else: # if do not has dir, use zero padding
xs, ys, dirs = pos[:, 0], pos[:, 1], np.zeros((n,), dtype=np.int32)
# copy again, to make these arrays continuous in memory
xs, ys, dirs = np.array(xs), np.array(ys), np.array(dirs)
_LIB.gridworld_add_agents(self.game, handle, n, b"custom", as_int32_c_array(xs),
as_int32_c_array(ys), as_int32_c_array(dirs))
elif method == "fill":
x, y = kwargs["pos"][0], kwargs["pos"][1]
width, height = kwargs["size"][0], kwargs["size"][1]
dir = kwargs.get("dir", np.zeros_like(x))
bind = np.array([x, y, width, height, dir], dtype=np.int32)
_LIB.gridworld_add_agents(self.game, handle, 0, b"fill", as_int32_c_array(bind),
0, 0, 0)
elif method == "maze":
# TODO: implement maze add
x_start, y_start, x_end, y_end = kwargs["pos"][0], kwargs["pos"][1], kwargs["pos"][2], kwargs["pos"][3]
thick = kwargs["pos"][4]
bind = np.array([x_start, y_start, x_end, y_end, thick], dtype=np.int32)
_LIB.gridworld_add_agents(self.game, handle, 0, b"maze", as_int32_c_array(bind),
0, 0, 0)
else:
print("Unknown type of position")
exit(-1)
# ====== RUN ======
def _get_obs_buf(self, group, key, shape, dtype):
"""get buffer to receive observation from c++ engine"""
obs_buf = self.obs_bufs[key]
if group in obs_buf:
ret = obs_buf[group]
if shape != ret.shape:
ret.resize(shape, refcheck=False)
else:
ret = obs_buf[group] = np.empty(shape=shape, dtype=dtype)
return ret
def _init_obs_buf(self):
"""init observation buffer"""
self.obs_bufs = []
self.obs_bufs.append({})
self.obs_bufs.append({})
def get_observation(self, handle):
""" get observation of a whole group
Parameters
----------
handle : group handle
Returns
-------
obs : tuple (views, features)
views is a numpy array, whose shape is n * view_width * view_height * n_channel
features is a numpy array, whose shape is n * feature_size
for agent i, (views[i], features[i]) is its observation at this step
"""
view_space = self.view_space[handle.value]
feature_space = self.feature_space[handle.value]
no = handle.value
n = self.get_num(handle)
view_buf = self._get_obs_buf(no, self.OBS_INDEX_VIEW, (n,) + view_space, np.float32)
feature_buf = self._get_obs_buf(no, self.OBS_INDEX_HP, (n,) + feature_space, np.float32)
bufs = (ctypes.POINTER(ctypes.c_float) * 2)()
bufs[0] = as_float_c_array(view_buf)
bufs[1] = as_float_c_array(feature_buf)
_LIB.env_get_observation(self.game, handle, bufs)
return view_buf, feature_buf
def set_action(self, handle, actions):
""" set actions for whole group
Parameters
----------
handle: group handle
actions: numpy array
the dtype of actions must be int32
"""
assert isinstance(actions, np.ndarray)
assert actions.dtype == np.int32
_LIB.env_set_action(self.game, handle, actions.ctypes.data_as(ctypes.POINTER(ctypes.c_int32)))
def step(self):
"""simulation one step after set actions
Returns
-------
done: bool
whether the game is done
"""
done = ctypes.c_int32()
_LIB.env_step(self.game, ctypes.byref(done))
return bool(done)
def get_reward(self, handle):
""" get reward for a whole group
Returns
-------
rewards: numpy array (float32)
reward for all the agents in the group
"""
n = self.get_num(handle)
buf = np.empty((n,), dtype=np.float32)
_LIB.env_get_reward(self.game, handle,
buf.ctypes.data_as(ctypes.POINTER(ctypes.c_float)))
return buf
def clear_dead(self):
""" clear dead agents in the engine
must be called after step()
"""
_LIB.gridworld_clear_dead(self.game)
# ====== INFO ======
def get_handles(self):
""" get all group handles in the environment """
return self.group_handles
def get_num(self, handle):
""" get the number of agents in a group"""
num = ctypes.c_int32()
_LIB.env_get_info(self.game, handle, b'num', ctypes.byref(num))
return num.value
def get_action_space(self, handle):
"""get action space
Returns
-------
action_space : tuple
"""
return self.action_space[handle.value]
def get_view_space(self, handle):
"""get view space
Returns
-------
view_space : tuple
"""
return self.view_space[handle.value]
def get_feature_space(self, handle):
""" get feature space
Returns
-------
feature_space : tuple
"""
return self.feature_space[handle.value]
def get_agent_id(self, handle):
""" get agent id
Returns
-------
ids : numpy array (int32)
id of all the agents in the group
"""
n = self.get_num(handle)
buf = np.empty((n,), dtype=np.int32)
_LIB.env_get_info(self.game, handle, b"id",
buf.ctypes.data_as(ctypes.POINTER(ctypes.c_int32)))
return buf
def get_alive(self, handle):
""" get alive status of agents in a group
Returns
-------
alives: numpy array (bool)
whether the agents are alive
"""
n = self.get_num(handle)
buf = np.empty((n,), dtype=np.bool)
_LIB.env_get_info(self.game, handle, b"alive",
buf.ctypes.data_as(ctypes.POINTER(ctypes.c_bool)))
return buf
def get_pos(self, handle):
""" get position of agents in a group
Returns
-------
pos: numpy array (int)
the shape of pos is (n, 2)
"""
n = self.get_num(handle)
buf = np.empty((n, 2), dtype=np.int32)
_LIB.env_get_info(self.game, handle, b"pos",
buf.ctypes.data_as(ctypes.POINTER(ctypes.c_int32)))
return buf
def get_mean_info(self, handle):
""" deprecated """
buf = np.empty(2 + self.action_space[handle.value][0], dtype=np.float32)
_LIB.env_get_info(self.game, handle, b"mean_info",
buf.ctypes.data_as(ctypes.POINTER(ctypes.c_float)))
return buf
def get_view2attack(self, handle):
""" get a matrix with the same size of view_range,
if element >= 0, then it means it is a attackable point, and the corresponding
action number is the value of that element
Returns
-------
attack_back: int
buf: numpy array
map attack action into view
"""
size = self.get_view_space(handle)[0:2]
buf = np.empty(size, dtype=np.int32)
attack_base = ctypes.c_int32()
_LIB.env_get_info(self.game, handle, b"view2attack",
buf.ctypes.data_as(ctypes.POINTER(ctypes.c_int32)))
_LIB.env_get_info(self.game, handle, b"attack_base",
ctypes.byref(attack_base))
return attack_base.value, buf
def get_global_minimap(self, height, width):
""" compress global map into a minimap of given size
Parameters
----------
height: int
the height of minimap
width: int
the width of minimap
Returns
-------
minimap : numpy array
the shape (n_group + 1, height, width)
"""
buf = np.empty((height, width, len(self.group_handles)), dtype=np.float32)
buf[0, 0, 0] = height
buf[0, 0, 1] = width
_LIB.env_get_info(self.game, -1, b"global_minimap",
buf.ctypes.data_as(ctypes.POINTER(ctypes.c_float)))
return buf
def set_seed(self, seed):
""" set random seed of the engine"""
_LIB.env_config_game(self.game, b"seed", ctypes.byref(ctypes.c_int(seed)))
# ====== RENDER ======
def set_render_dir(self, name):
""" set directory to save render file"""
if not os.path.exists(name):
os.mkdir(name)
_LIB.env_config_game(self.game, b"render_dir", name.encode("ascii"))
def render(self):
""" render a step """
_LIB.env_render(self.game)
def _get_groups_info(self):
""" private method, for interactive application"""
n = len(self.group_handles)
buf = np.empty((n, 5), dtype=np.int32)
_LIB.env_get_info(self.game, -1, b"groups_info",
buf.ctypes.data_as(ctypes.POINTER(ctypes.c_int32)))
return buf
def _get_walls_info(self):
""" private method, for interactive application"""
n = 100 * 100
buf = np.empty((n, 2), dtype=np.int32)
_LIB.env_get_info(self.game, -1, b"walls_info",
buf.ctypes.data_as(ctypes.POINTER(ctypes.c_int32)))
n = buf[0, 0] # the first line is the number of walls
return buf[1:1+n]
def _get_render_info(self, x_range, y_range):
""" private method, for interactive application"""
n = 0
for handle in self.group_handles:
n += self.get_num(handle)
buf = np.empty((n+1, 4), dtype=np.int32)
buf[0] = x_range[0], y_range[0], x_range[1], y_range[1]
_LIB.env_get_info(self.game, -1, b"render_window_info",
buf.ctypes.data_as(ctypes.POINTER((ctypes.c_int32))))
# the first line is for the number of agents in the window range
info_line = buf[0]
agent_ct, attack_event_ct = info_line[0], info_line[1]
buf = buf[1:1 + info_line[0]]
agent_info = {}
for item in buf:
agent_info[item[0]] = [item[1], item[2], item[3]]
buf = np.empty((attack_event_ct, 3), dtype=np.int32)
_LIB.env_get_info(self.game, -1, b"attack_event",
buf.ctypes.data_as(ctypes.POINTER((ctypes.c_int32))))
attack_event = buf
return agent_info, attack_event
def __del__(self):
_LIB.env_delete_game(self.game)
# ====== SPECIAL RULE ======
def set_goal(self, handle, method, *args, **kwargs):
""" deprecated """
if method == "random":
_LIB.gridworld_set_goal(self.game, handle, b"random", 0, 0)
else:
raise NotImplementedError
# ====== PRIVATE ======
def _serialize_event_exp(self, config):
"""serialize event expression and sent them to game engine"""
game = self.game
# collect agent symbol
symbol2int = {}
config.symbol_ct = 0
def collect_agent_symbol(node, config):
for item in node.inputs:
if isinstance(item, EventNode):
collect_agent_symbol(item, config)
elif isinstance(item, AgentSymbol):
if item not in symbol2int:
symbol2int[item] = config.symbol_ct
config.symbol_ct += 1
for rule in config.reward_rules:
on = rule[0]
receiver = rule[1]
for symbol in receiver:
if symbol not in symbol2int:
symbol2int[symbol] = config.symbol_ct
config.symbol_ct += 1
collect_agent_symbol(on, config)
# collect event node
event2int = {}
config.node_ct = 0
def collect_event_node(node, config):
if node not in event2int:
event2int[node] = config.node_ct
config.node_ct += 1
for item in node.inputs:
if isinstance(item, EventNode):
collect_event_node(item, config)
for rule in config.reward_rules:
collect_event_node(rule[0], config)
# send to C++ engine
for sym in symbol2int:
no = symbol2int[sym]
_LIB.gridworld_define_agent_symbol(game, no, sym.group, sym.index)
for event in event2int:
no = event2int[event]
inputs = np.zeros_like(event.inputs, dtype=np.int32)
for i, item in enumerate(event.inputs):
if isinstance(item, EventNode):
inputs[i] = event2int[item]
elif isinstance(item, AgentSymbol):
inputs[i] = symbol2int[item]
else:
inputs[i] = item
n_inputs = len(inputs)
_LIB.gridworld_define_event_node(game, no, event.op, as_int32_c_array(inputs), n_inputs)
for rule in config.reward_rules:
# rule = [on, receiver, value, terminal]
on = event2int[rule[0]]
receiver = np.zeros_like(rule[1], dtype=np.int32)
for i, item in enumerate(rule[1]):
receiver[i] = symbol2int[item]
if len(rule[2]) == 1 and rule[2][0] == 'auto':
value = np.zeros(receiver, dtype=np.float32)
else:
value = np.array(rule[2], dtype=np.float32)
n_receiver = len(receiver)
_LIB.gridworld_add_reward_rule(game, on, as_int32_c_array(receiver),
as_float_c_array(value), n_receiver, rule[3])
'''
the following classes are for reward description
'''
class EventNode:
"""an AST node of the event expression"""
OP_AND = 0
OP_OR = 1
OP_NOT = 2
OP_KILL = 3
OP_AT = 4
OP_IN = 5
OP_COLLIDE = 6
OP_ATTACK = 7
OP_DIE = 8
OP_IN_A_LINE = 9
OP_ALIGN = 10
# can extend more operation below
def __init__(self):
# for non-leaf node
self.op = None
# for leaf node
self.predicate = None
self.inputs = []
def __call__(self, subject, predicate, *args):
node = EventNode()
node.predicate = predicate
if predicate == 'kill':
node.op = EventNode.OP_KILL
node.inputs = [subject, args[0]]
elif predicate == 'at':
node.op = EventNode.OP_AT
coor = args[0]
node.inputs = [subject, coor[0], coor[1]]
elif predicate == 'in':
node.op = EventNode.OP_IN
coor = args[0]
x1, y1 = min(coor[0][0], coor[1][0]), min(coor[0][1], coor[1][1])
x2, y2 = max(coor[0][0], coor[1][0]), max(coor[0][1], coor[1][1])
node.inputs = [subject, x1, y1, x2, y2]
elif predicate == 'attack':
node.op = EventNode.OP_ATTACK
node.inputs = [subject, args[0]]
elif predicate == 'kill':
node.op = EventNode.OP_KILL
node.inputs = [subject, args[0]]
elif predicate == 'collide':
node.op = EventNode.OP_COLLIDE
node.inputs = [subject, args[0]]
elif predicate == 'die':
node.op = EventNode.OP_DIE
node.inputs = [subject]
elif predicate == 'in_a_line':
node.op = EventNode.OP_IN_A_LINE
node.inputs = [subject]
elif predicate == 'align':
node.op = EventNode.OP_ALIGN
node.inputs = [subject]
else:
raise Exception("invalid predicate of event " + predicate)
return node
def __and__(self, other):
node = EventNode()
node.op = EventNode.OP_AND
node.inputs = [self, other]
return node
def __or__(self, other):
node = EventNode()
node.op = EventNode.OP_OR
node.inputs = [self, other]
return node
def __invert__(self):
node = EventNode()
node.op = EventNode.OP_NOT
node.inputs = [self]
return node
Event = EventNode()
class AgentSymbol:
"""symbol to represent some agents"""
def __init__(self, group, index):
""" define a agent symbol, it can be the object or subject of EventNode
group: group handle
it is the return value of cfg.add_group()
index: int or str
int: a deterministic integer id
str: can be 'all' or 'any', represents all or any agents in a group
"""
self.group = group if group is not None else -1
if index == 'any':
self.index = -1
elif index == 'all':
self.index = -2
else:
assert isinstance(self.index, int), "index must be a deterministic int"
self.index = index
def __str__(self):
return 'agent(%d,%d)' % (self.group, self.index)
class Config:
"""configuration class of gridworld game"""
def __init__(self):
self.config_dict = {}
self.agent_type_dict = {}
self.groups = []
self.reward_rules = []
def set(self, args):
""" set parameters of global configuration
Parameters
----------
args : dict
key value pair of the configuration
"""
for key in args:
self.config_dict[key] = args[key]
def register_agent_type(self, name, attr):
""" register an agent type
Parameters
----------
name : str
name of the type (should be unique)
attr: dict
key value pair of the agent type
see notes below to know the available attributes
Notes
-----
height: int, height of agent body
width: int, width of agent body
speed: float, maximum speed, i.e. the radius of move circle of the agent
hp: float, maximum health point of the agent
view_range: gw.CircleRange or gw.SectorRange
damage: float, attack damage
step_recover: float, step recover of health point (can be negative)
kill_supply: float, the hp gain when kill this type of agents
step_reward: float, reward get in every step
kill_reward: float, reward gain when kill this type of agent
dead_penalty: float, reward get when dead
attack_penalty: float, reward get when perform an attack (this is used to make agents do not attack blank grid)
"""
if name in self.agent_type_dict:
raise Exception("type name %s already exists" % name)
self.agent_type_dict[name] = attr
return name
def add_group(self, agent_type):
""" add a group to the configuration
Returns
-------
group_handle : int
a handle for the new added group
"""
no = len(self.groups)
self.groups.append(agent_type)
return no
def add_reward_rule(self, on, receiver, value, terminal=False):
""" add a reward rule
Some note:
1. if the receiver is not a deterministic agent,
it must be one of the agents involved in the triggering event
Parameters
----------
on: Expr
a bool expression of the trigger event
receiver: (list of) AgentSymbol
receiver of this reward rule
value: (list of) float
value to assign
terminal: bool
whether this event will terminate the game
"""
if not (isinstance(receiver, tuple) or isinstance(receiver, list)):
assert not (isinstance(value, tuple) or isinstance(value, tuple))
receiver = [receiver]
value = [value]
if len(receiver) != len(value):
raise Exception("the length of receiver and value should be equal")
self.reward_rules.append([on, receiver, value, terminal])
class CircleRange:
def __init__(self, radius):
""" define a circle range for attack or view
Parameters
----------
radius : float
"""
self.radius = radius
self.angle = 360
def __str__(self):
return 'circle(%g)' % self.radius
class SectorRange:
def __init__(self, radius, angle):
""" define a sector range for attack or view
Parameters
----------
radius : float
angle : float
angle should be less than 180
"""
self.radius = radius
self.angle = angle
if self.angle >= 180:
raise Exception("the angle of a sector should be smaller than 180 degree")
def __str__(self):
return 'sector(%g, %g)' % (self.radius, self.angle)
| 27,843 | 33.761548 | 123 | py |
MAgent | MAgent-master/python/magent/model.py | """ base model classes"""
try:
import thread
except ImportError:
import _thread as thread
import multiprocessing
import multiprocessing.connection
import sys
import numpy as np
class BaseModel:
def __init__(self, env, handle, *args, **kwargs):
""" init
Parameters
----------
env: Environment
env
handle: GroupHandle
handle of this group, handles are returned by env.get_handles()
"""
pass
def infer_action(self, raw_obs, ids, *args, **kwargs):
""" infer action for a group of agents
Parameters
----------
raw_obs: tuple
raw_obs is a tuple of (view, feature)
view is a numpy array, its shape is n * view_width * view_height * n_channel
it contains the spatial local observation for all the agents
feature is a numpy array, its shape is n * feature_size
it contains the non-spatial feature for all the agents
ids: numpy array of int32
the unique id of every agents
args:
additional custom args
kwargs:
additional custom args
"""
pass
def train(self, sample_buffer, **kwargs):
""" feed new samples and train
Parameters
----------
sample_buffer: EpisodesBuffer
a buffer contains transitions of agents
Returns
-------
loss and estimated mean state value
"""
return 0, 0 # loss, mean value
def save(self, *args, **kwargs):
""" save the model """
pass
def load(self, *args, **kwargs):
""" load the model """
pass
class NDArrayPackage:
"""wrapper for transferring numpy arrays by bytes"""
def __init__(self, *args):
if isinstance(args[0], np.ndarray):
self.data = args
self.info = [(x.shape, x.dtype) for x in args]
else:
self.data = None
self.info = args[0]
self.max_len = (1 << 30) / 4
def send_to(self, conn, use_thread=False):
assert self.data is not None
def send_thread():
for x in self.data:
if np.prod(x.shape) > self.max_len:
seg = int(self.max_len // np.prod(x.shape[1:]))
for pt in range(0, len(x), seg):
conn.send_bytes(x[pt:pt+seg])
else:
conn.send_bytes(x)
if use_thread:
thread.start_new_thread(send_thread, ())
else:
send_thread()
def recv_from(self, conn):
bufs = []
for info in self.info:
buf = np.empty(shape=(int(np.prod(info[0])),), dtype=info[1])
item_size = int(np.prod(info[0][1:]))
if np.prod(info[0]) > self.max_len:
seg = int(self.max_len // item_size)
for pt in range(0, int(np.prod(info[0])), seg * item_size):
conn.recv_bytes_into(buf[pt:pt+seg * item_size])
else:
conn.recv_bytes_into(buf)
bufs.append(buf.reshape(info[0]))
return bufs
class ProcessingModel(BaseModel):
"""
start a sub-processing to host a model,
use pipe or socket for communication
"""
def __init__(self, env, handle, name, port, sample_buffer_capacity=1000,
RLModel=None, **kwargs):
"""
Parameters
----------
env: environment
handle: group handle
name: str
name of the model (be used when store model)
port: int
port of socket or suffix of pipe
sample_buffer_capacity: int
the maximum number of samples (s,r,a,s') to collect in a game round
RLModel: BaseModel
the RL algorithm class
kwargs: dict
arguments for RLModel
"""
BaseModel.__init__(self, env, handle)
assert RLModel is not None
kwargs['env'] = env
kwargs['handle'] = handle
kwargs['name'] = name
addr = 'magent-pipe-' + str(port) # named pipe
# addr = ('localhost', port) # socket
proc = multiprocessing.Process(
target=model_client,
args=(addr, sample_buffer_capacity, RLModel, kwargs),
)
self.client_proc = proc
proc.start()
listener = multiprocessing.connection.Listener(addr)
self.conn = listener.accept()
def sample_step(self, rewards, alives, block=True):
"""record a step (should be followed by check_done)
Parameters
----------
block: bool
if it is True, the function call will block
if it is False, the caller must call check_done() afterward
to check/consume the return message
"""
package = NDArrayPackage(rewards, alives)
self.conn.send(["sample", package.info])
package.send_to(self.conn)
if block:
self.check_done()
def infer_action(self, raw_obs, ids, policy='e_greedy', eps=0, block=True):
""" infer action
Parameters
----------
policy: str
can be 'e_greedy' or 'greedy'
eps: float
used when policy is 'e_greedy'
block: bool
if it is True, the function call will block, and return actions
if it is False, the function call won't block, the caller
must call fetch_action() to get actions
Returns
-------
actions: numpy array (int32)
see above
"""
package = NDArrayPackage(raw_obs[0], raw_obs[1], ids)
self.conn.send(["act", policy, eps, package.info])
package.send_to(self.conn, use_thread=True)
if block:
info = self.conn.recv()
return NDArrayPackage(info).recv_from(self.conn)[0]
else:
return None
def fetch_action(self):
""" fetch actions , fetch action after calling infer_action(block=False)
Returns
-------
actions: numpy array (int32)
"""
info = self.conn.recv()
return NDArrayPackage(info).recv_from(self.conn)[0]
def train(self, print_every=5000, block=True):
""" train new data samples according to the model setting
Parameters
----------
print_every: int
print training log info every print_every batches
"""
self.conn.send(['train', print_every])
if block:
return self.fetch_train()
def fetch_train(self):
""" fetch result of train after calling train(block=False)
Returns
-------
loss: float
mean loss
value: float
mean state value
"""
return self.conn.recv()
def save(self, save_dir, epoch, block=True):
""" save model
Parameters
----------
block: bool
if it is True, the function call will block
if it is False, the caller must call check_done() afterward
to check/consume the return message
"""
self.conn.send(["save", save_dir, epoch])
if block:
self.check_done()
def load(self, save_dir, epoch, name=None, block=True):
""" load model
Parameters
----------
name: str
name of the model (set when stored name is not the same as self.name)
block: bool
if it is True, the function call will block
if it is False, the caller must call check_done() afterward
to check/consume the return message
"""
self.conn.send(["load", save_dir, epoch, name])
if block:
self.check_done()
def check_done(self):
""" check return message of sub processing """
assert self.conn.recv() == 'done'
def quit(self):
""" quit """
proc = self.client_proc
self.client_proc = None
self.conn.send(["quit"])
proc.join()
def __del__(self):
""" quit in destruction """
if self.client_proc is not None:
quit()
def model_client(addr, sample_buffer_capacity, RLModel, model_args):
"""target function for sub-processing to host a model
Parameters
----------
addr: socket address
sample_buffer_capacity: int
the maximum number of samples (s,r,a,s') to collect in a game round
RLModel: BaseModel
the RL algorithm class
args: dict
arguments to RLModel
"""
import magent.utility
model = RLModel(**model_args)
sample_buffer = magent.utility.EpisodesBuffer(capacity=sample_buffer_capacity)
conn = multiprocessing.connection.Client(addr)
while True:
cmd = conn.recv()
if cmd[0] == 'act':
policy = cmd[1]
eps = cmd[2]
array_info = cmd[3]
view, feature, ids = NDArrayPackage(array_info).recv_from(conn)
obs = (view, feature)
acts = model.infer_action(obs, ids, policy=policy, eps=eps)
package = NDArrayPackage(acts)
conn.send(package.info)
package.send_to(conn)
elif cmd[0] == 'train':
print_every = cmd[1]
total_loss, value = model.train(sample_buffer, print_every=print_every)
sample_buffer = magent.utility.EpisodesBuffer(sample_buffer_capacity)
conn.send((total_loss, value))
elif cmd[0] == 'sample':
array_info = cmd[1]
rewards, alives = NDArrayPackage(array_info).recv_from(conn)
sample_buffer.record_step(ids, obs, acts, rewards, alives)
conn.send("done")
elif cmd[0] == 'save':
savedir = cmd[1]
n_iter = cmd[2]
model.save(savedir, n_iter)
conn.send("done")
elif cmd[0] == 'load':
savedir = cmd[1]
n_iter = cmd[2]
name = cmd[3]
model.load(savedir, n_iter, name)
conn.send("done")
elif cmd[0] == 'quit':
break
else:
print("Error: Unknown command %s" % cmd[0])
break
| 10,411 | 28.91954 | 95 | py |
MAgent | MAgent-master/python/magent/discrete_snake.py | """ Deprecated!! """
from __future__ import absolute_import
import ctypes
import os
import importlib
import numpy as np
from .c_lib import _LIB, as_float_c_array, as_int32_c_array
from .environment import Environment
class DiscreteSnake(Environment):
"""deprecated"""
OBS_VIEW_INDEX = 0
OBS_FEATURE_INDEX = 1
def __init__(self, config, **kwargs):
Environment.__init__(self)
# for global settings
game = ctypes.c_void_p()
_LIB.env_new_game(ctypes.byref(game), b"DiscreteSnake")
self.game = game
config_value_type = {
'map_width': int, 'map_height': int,
'view_width': int, 'view_height': int,
'max_dead_penalty': float, 'corpse_value': float,
'embedding_size': int, 'total_resource': int,
'render_dir': str,
}
# config general setting
for key in config.config_dict:
print("discrete_snake.py L37 : ", key, config.config_dict[key])
value_type = config_value_type[key]
if value_type is int:
_LIB.env_config_game(self.game, key, ctypes.byref(ctypes.c_int(config.config_dict[key])))
elif value_type is bool:
_LIB.env_config_game(self.game, key, ctypes.byref(ctypes.c_bool(config.config_dict[key])))
elif value_type is float:
_LIB.env_config_game(self.game, key, ctypes.byref(ctypes.c_float(config.config_dict[key])))
elif value_type is str:
_LIB.env_config_game(self.game, key, ctypes.c_char_p(config.config_dict[key]))
# init observation buffer (for acceleration)
self._init_obs_buf()
# init view size, feature size, action space
buf = np.empty((3,), dtype=np.int32)
_LIB.env_get_info(self.game, 0, b"view_space",
buf.ctypes.data_as(ctypes.POINTER(ctypes.c_int32)))
self.view_space = [buf[0], buf[1], buf[2]]
_LIB.env_get_info(self.game, 0, b"feature_space",
buf.ctypes.data_as(ctypes.POINTER(ctypes.c_int32)))
self.feature_space = buf[0]
_LIB.env_get_info(self.game, 0, b"action_space",
buf.ctypes.data_as(ctypes.POINTER(ctypes.c_int32)))
self.action_space = buf[0]
def reset(self):
_LIB.env_reset(self.game)
def _add_object(self, obj_id, method, **kwargs):
if method == "random":
_LIB.discrete_snake_add_object(self.game, obj_id, int(kwargs["n"]), b"random", 0)
else:
print("unsupported type of method")
exit(-1)
def add_walls(self, method, **kwargs):
# handle = -1 for walls
self._add_object(-1, method, **kwargs)
def add_food(self, method, **kwargs):
# handles = -2 for food
self._add_object(-2, method, **kwargs)
def add_agent(self, method, *args, **kwargs):
self._add_object(0, method, **kwargs)
# ====== RUN ======
def _get_obs_buf(self, key, shape, dtype):
if self.obs_bufs[key] is None:
group_buf = self.obs_bufs[key] = [1] # (buf_id, buf1, buf2, ...)
group_buf.append(np.zeros(shape=shape, dtype=dtype))
group_buf.append(np.zeros(shape=shape, dtype=dtype))
ret = group_buf[1]
else:
group_buf = self.obs_bufs[key]
turn = group_buf[0]
ret = group_buf[turn]
if shape != ret.shape:
ret.resize(shape, refcheck=False)
group_buf[0] = (turn-1 + 1) % 2 + 1
return ret
def _init_obs_buf(self):
self.obs_bufs = [None, None]
def get_observation(self, handle=0):
view_space = self.view_space
feature_space = self.feature_space
n = self.get_num(handle)
view_buf = self._get_obs_buf(self.OBS_VIEW_INDEX, [n] + view_space, np.float32)
feature_buf = self._get_obs_buf(self.OBS_FEATURE_INDEX, (n, feature_space), np.float32)
bufs = (ctypes.POINTER(ctypes.c_float) * 2)()
bufs[0] = as_float_c_array(view_buf)
bufs[1] = as_float_c_array(feature_buf)
_LIB.env_get_observation(self.game, handle, bufs)
return view_buf, feature_buf
def set_action(self, handle, actions):
assert isinstance(actions, np.ndarray)
assert actions.dtype == np.int32
_LIB.env_set_action(self.game, handle, actions.ctypes.data_as(ctypes.POINTER(ctypes.c_int32)))
def step(self):
done = ctypes.c_int32()
_LIB.env_step(self.game, ctypes.byref(done))
return done
def get_reward(self, handle=0):
n = self.get_num(handle)
buf = np.empty((n,), dtype=np.float32)
_LIB.env_get_reward(self.game, handle,
buf.ctypes.data_as(ctypes.POINTER(ctypes.c_float)))
return buf
def clear_dead(self):
_LIB.discrete_snake_clear_dead(self.game)
# ====== INFO ======
def get_num(self, handle=0):
num = ctypes.c_int32()
_LIB.env_get_info(self.game, handle, "num", ctypes.byref(num))
return num.value
def get_action_space(self, handle=0):
return self.action_space
def get_view_space(self, handle=0):
return self.view_space
def get_feature_space(self, handle=0):
return self.feature_space
def get_agent_id(self, handle=0):
n = self.get_num(handle)
buf = np.empty((n,), dtype=np.int32)
_LIB.env_get_info(self.game, handle, b"id",
buf.ctypes.data_as(ctypes.POINTER(ctypes.c_int32)))
return buf
def get_head(self, handle=0):
n = self.get_num(handle)
buf = np.empty((n, 2), dtype=np.int32)
_LIB.env_get_info(self.game, handle, b"head",
buf.ctypes.data_as(ctypes.POINTER(ctypes.c_int32)))
return buf
def get_alive(self, handle=0):
n = self.get_num(handle)
buf = np.empty((n,), dtype=np.bool)
_LIB.env_get_info(self.game, handle, b"alive",
buf.ctypes.data_as(ctypes.POINTER(ctypes.c_bool)))
return buf
def get_length(self, handle=0):
n = self.get_num(handle)
buf = np.empty((n, ), dtype=np.int32)
_LIB.env_get_info(self.game, handle, b"length",
buf.ctypes.data_as(ctypes.POINTER(ctypes.c_int)))
return buf
def get_food_num(self):
num = ctypes.c_int32()
_LIB.env_get_info(self.game, -2, "num", ctypes.byref(num)) # -2 for food
return num.value
# ====== RENDER ======
def set_render_dir(self, name):
if not os.path.exists(name):
os.mkdir(name)
_LIB.env_config_game(self.game, b"render_dir", name)
def render(self):
_LIB.env_render(self.game)
def render_next_file(self):
_LIB.env_render_next_file(self.game)
def __del__(self):
_LIB.env_delete_game(self.game)
class Config:
def __init__(self):
self.config_dict = {}
def set(self, args):
for key in args:
self.config_dict[key] = args[key] | 7,151 | 33.057143 | 107 | py |
MAgent | MAgent-master/python/magent/__init__.py | from . import model
from . import utility
from . import gridworld
# some alias
GridWorld = gridworld.GridWorld
ProcessingModel = model.ProcessingModel
round = utility.rec_round
| 178 | 18.888889 | 39 | py |
MAgent | MAgent-master/python/magent/c_lib.py | """ some utility for call C++ code"""
from __future__ import absolute_import
import os
import ctypes
import platform
import multiprocessing
def _load_lib():
""" Load library in build/lib. """
cur_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
lib_path = os.path.join(cur_path, "../../build/")
if platform.system() == 'Darwin':
path_to_so_file = os.path.join(lib_path, "libmagent.dylib")
elif platform.system() == 'Linux':
path_to_so_file = os.path.join(lib_path, "libmagent.so")
else:
raise BaseException("unsupported system: " + platform.system())
lib = ctypes.CDLL(path_to_so_file, ctypes.RTLD_GLOBAL)
return lib
def as_float_c_array(buf):
"""numpy to ctypes array"""
return buf.ctypes.data_as(ctypes.POINTER(ctypes.c_float))
def as_int32_c_array(buf):
"""numpy to ctypes array"""
return buf.ctypes.data_as(ctypes.POINTER(ctypes.c_int32))
def as_bool_c_array(buf):
"""numpy to ctypes array"""
return buf.ctypes.data_as(ctypes.POINTER(ctypes.c_bool))
if 'OMP_NUM_THREADS' not in os.environ:
os.environ['OMP_NUM_THREADS'] = str(multiprocessing.cpu_count() // 2)
_LIB = _load_lib()
| 1,200 | 26.930233 | 77 | py |
MAgent | MAgent-master/python/magent/builtin/common.py | """Replay buffer for deep q network"""
import numpy as np
class ReplayBuffer:
"""a circular queue based on numpy array, supporting batch put and batch get"""
def __init__(self, shape, dtype=np.float32):
self.buffer = np.empty(shape=shape, dtype=dtype)
self.head = 0
self.capacity = len(self.buffer)
def put(self, data):
"""put data to
Parameters
----------
data: numpy array
data to add
"""
head = self.head
n = len(data)
if head + n <= self.capacity:
self.buffer[head:head+n] = data
self.head = (self.head + n) % self.capacity
else:
split = self.capacity - head
self.buffer[head:] = data[:split]
self.buffer[:n - split] = data[split:]
self.head = split
return n
def get(self, index):
"""get items
Parameters
----------
index: int or numpy array
it can be any numpy supported index
"""
return self.buffer[index]
def clear(self):
"""clear replay buffer"""
self.head = 0
| 1,165 | 24.347826 | 83 | py |
MAgent | MAgent-master/python/magent/builtin/__init__.py | 0 | 0 | 0 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.