body_hash
stringlengths
64
64
body
stringlengths
23
109k
docstring
stringlengths
1
57k
path
stringlengths
4
198
name
stringlengths
1
115
repository_name
stringlengths
7
111
repository_stars
float64
0
191k
lang
stringclasses
1 value
body_without_docstring
stringlengths
14
108k
unified
stringlengths
45
133k
f63819a020b8a0ee587a657e6ec7959aa6547157176d4d89f46bac9c9bdbebfb
def _set_state(self, state, is_layer_change): 'Internal state transition interface.\n\n @param state: the new destination state\n\n @param is_layer_change: C{True} iff the transition inducing\n the state change involves a layer change.\n ' if (state == self.__state): return if is_layer_change: self.__subConfiguration = None self.__subAutomata = None self.__state = state if (is_layer_change and (state.subAutomata is not None)): assert (self.__subAutomata is None) self.__subAutomata = list(state.subAutomata)
Internal state transition interface. @param state: the new destination state @param is_layer_change: C{True} iff the transition inducing the state change involves a layer change.
pyxb/utils/fac.py
_set_state
maciekwawro/pyxb
123
python
def _set_state(self, state, is_layer_change): 'Internal state transition interface.\n\n @param state: the new destination state\n\n @param is_layer_change: C{True} iff the transition inducing\n the state change involves a layer change.\n ' if (state == self.__state): return if is_layer_change: self.__subConfiguration = None self.__subAutomata = None self.__state = state if (is_layer_change and (state.subAutomata is not None)): assert (self.__subAutomata is None) self.__subAutomata = list(state.subAutomata)
def _set_state(self, state, is_layer_change): 'Internal state transition interface.\n\n @param state: the new destination state\n\n @param is_layer_change: C{True} iff the transition inducing\n the state change involves a layer change.\n ' if (state == self.__state): return if is_layer_change: self.__subConfiguration = None self.__subAutomata = None self.__state = state if (is_layer_change and (state.subAutomata is not None)): assert (self.__subAutomata is None) self.__subAutomata = list(state.subAutomata)<|docstring|>Internal state transition interface. @param state: the new destination state @param is_layer_change: C{True} iff the transition inducing the state change involves a layer change.<|endoftext|>
236f984af1e142e9438ed82ccb19d9e41784a422a82e6dbddca70dfedcb4e6cd
def __get_subConfiguration(self): 'Reference to configuration being executed in a sub-automaton.\n\n C{None} if no sub-automaton is active, else a reference to a\n configuration that is being executed in a sub-automaton.\n\n Sub-configurations are used to match sub-terms in an\n L{unordered catenation<All>} term. A configuration may have\n at most one sub-configuration at a time, and the configuration\n will be removed and possibly replaced when the term being\n processed completes.' return self.__subConfiguration
Reference to configuration being executed in a sub-automaton. C{None} if no sub-automaton is active, else a reference to a configuration that is being executed in a sub-automaton. Sub-configurations are used to match sub-terms in an L{unordered catenation<All>} term. A configuration may have at most one sub-configuration at a time, and the configuration will be removed and possibly replaced when the term being processed completes.
pyxb/utils/fac.py
__get_subConfiguration
maciekwawro/pyxb
123
python
def __get_subConfiguration(self): 'Reference to configuration being executed in a sub-automaton.\n\n C{None} if no sub-automaton is active, else a reference to a\n configuration that is being executed in a sub-automaton.\n\n Sub-configurations are used to match sub-terms in an\n L{unordered catenation<All>} term. A configuration may have\n at most one sub-configuration at a time, and the configuration\n will be removed and possibly replaced when the term being\n processed completes.' return self.__subConfiguration
def __get_subConfiguration(self): 'Reference to configuration being executed in a sub-automaton.\n\n C{None} if no sub-automaton is active, else a reference to a\n configuration that is being executed in a sub-automaton.\n\n Sub-configurations are used to match sub-terms in an\n L{unordered catenation<All>} term. A configuration may have\n at most one sub-configuration at a time, and the configuration\n will be removed and possibly replaced when the term being\n processed completes.' return self.__subConfiguration<|docstring|>Reference to configuration being executed in a sub-automaton. C{None} if no sub-automaton is active, else a reference to a configuration that is being executed in a sub-automaton. Sub-configurations are used to match sub-terms in an L{unordered catenation<All>} term. A configuration may have at most one sub-configuration at a time, and the configuration will be removed and possibly replaced when the term being processed completes.<|endoftext|>
7cb0fe6723b9cc773644e16711e393039387c470faf0fc0a0e1c27075c0038fa
def __get_superConfiguration(self): 'Reference to the configuration for which this is a\n sub-configuration.\n\n C{None} if no super-automaton is active, else a reference to a\n configuration that is being executed in a super-automaton.\n\n The super-configuration relation persists for the lifetime of\n the configuration.' return self.__superConfiguration
Reference to the configuration for which this is a sub-configuration. C{None} if no super-automaton is active, else a reference to a configuration that is being executed in a super-automaton. The super-configuration relation persists for the lifetime of the configuration.
pyxb/utils/fac.py
__get_superConfiguration
maciekwawro/pyxb
123
python
def __get_superConfiguration(self): 'Reference to the configuration for which this is a\n sub-configuration.\n\n C{None} if no super-automaton is active, else a reference to a\n configuration that is being executed in a super-automaton.\n\n The super-configuration relation persists for the lifetime of\n the configuration.' return self.__superConfiguration
def __get_superConfiguration(self): 'Reference to the configuration for which this is a\n sub-configuration.\n\n C{None} if no super-automaton is active, else a reference to a\n configuration that is being executed in a super-automaton.\n\n The super-configuration relation persists for the lifetime of\n the configuration.' return self.__superConfiguration<|docstring|>Reference to the configuration for which this is a sub-configuration. C{None} if no super-automaton is active, else a reference to a configuration that is being executed in a super-automaton. The super-configuration relation persists for the lifetime of the configuration.<|endoftext|>
f30d6c09d9000ba41d604be6086323479c1a2a20ae77ce64001a941d71c9f0a7
def __get_subAutomata(self): 'A set of automata that must be satisfied before the current state can complete.\n\n This is used in unordered catenation. Each sub-automaton\n represents a term in the catenation. When the configuration\n enters a state with sub-automata, a set containing references\n to those automata is assigned to this attribute.\n Subsequently, until all automata in the state are satisfied,\n transitions can only occur within an active sub-automaton, out\n of the active sub-automaton if it is in an accepting state,\n and into a new sub-automaton if no sub-automaton is active.\n ' return self.__subAutomata
A set of automata that must be satisfied before the current state can complete. This is used in unordered catenation. Each sub-automaton represents a term in the catenation. When the configuration enters a state with sub-automata, a set containing references to those automata is assigned to this attribute. Subsequently, until all automata in the state are satisfied, transitions can only occur within an active sub-automaton, out of the active sub-automaton if it is in an accepting state, and into a new sub-automaton if no sub-automaton is active.
pyxb/utils/fac.py
__get_subAutomata
maciekwawro/pyxb
123
python
def __get_subAutomata(self): 'A set of automata that must be satisfied before the current state can complete.\n\n This is used in unordered catenation. Each sub-automaton\n represents a term in the catenation. When the configuration\n enters a state with sub-automata, a set containing references\n to those automata is assigned to this attribute.\n Subsequently, until all automata in the state are satisfied,\n transitions can only occur within an active sub-automaton, out\n of the active sub-automaton if it is in an accepting state,\n and into a new sub-automaton if no sub-automaton is active.\n ' return self.__subAutomata
def __get_subAutomata(self): 'A set of automata that must be satisfied before the current state can complete.\n\n This is used in unordered catenation. Each sub-automaton\n represents a term in the catenation. When the configuration\n enters a state with sub-automata, a set containing references\n to those automata is assigned to this attribute.\n Subsequently, until all automata in the state are satisfied,\n transitions can only occur within an active sub-automaton, out\n of the active sub-automaton if it is in an accepting state,\n and into a new sub-automaton if no sub-automaton is active.\n ' return self.__subAutomata<|docstring|>A set of automata that must be satisfied before the current state can complete. This is used in unordered catenation. Each sub-automaton represents a term in the catenation. When the configuration enters a state with sub-automata, a set containing references to those automata is assigned to this attribute. Subsequently, until all automata in the state are satisfied, transitions can only occur within an active sub-automaton, out of the active sub-automaton if it is in an accepting state, and into a new sub-automaton if no sub-automaton is active.<|endoftext|>
dbe451630a8785ab397090bedd9e7b497273df121d6c77ca9d3862b8f7de2206
def makeLeaveAutomatonTransition(self): 'Create a transition back to the containing configuration.\n\n This is done when a configuration is in an accepting state and\n there are candidate transitions to other states that must be\n considered. The transition does not consume a symbol.' assert (self.__superConfiguration is not None) return Transition(self.__superConfiguration.__state, set(), layer_link=self.__superConfiguration)
Create a transition back to the containing configuration. This is done when a configuration is in an accepting state and there are candidate transitions to other states that must be considered. The transition does not consume a symbol.
pyxb/utils/fac.py
makeLeaveAutomatonTransition
maciekwawro/pyxb
123
python
def makeLeaveAutomatonTransition(self): 'Create a transition back to the containing configuration.\n\n This is done when a configuration is in an accepting state and\n there are candidate transitions to other states that must be\n considered. The transition does not consume a symbol.' assert (self.__superConfiguration is not None) return Transition(self.__superConfiguration.__state, set(), layer_link=self.__superConfiguration)
def makeLeaveAutomatonTransition(self): 'Create a transition back to the containing configuration.\n\n This is done when a configuration is in an accepting state and\n there are candidate transitions to other states that must be\n considered. The transition does not consume a symbol.' assert (self.__superConfiguration is not None) return Transition(self.__superConfiguration.__state, set(), layer_link=self.__superConfiguration)<|docstring|>Create a transition back to the containing configuration. This is done when a configuration is in an accepting state and there are candidate transitions to other states that must be considered. The transition does not consume a symbol.<|endoftext|>
0098ccdd66bbf8cc384807ffc56b97406c410ab13848b333869ddc0c351bede5
def leaveAutomaton(self, sub_configuration): 'Execute steps to leave a sub-automaton.\n\n @param sub_configuration: The configuration associated with\n the automata that has completed.\n\n @return: C{self}' assert (sub_configuration.__superConfiguration == self) self.__subConfiguration = None return self
Execute steps to leave a sub-automaton. @param sub_configuration: The configuration associated with the automata that has completed. @return: C{self}
pyxb/utils/fac.py
leaveAutomaton
maciekwawro/pyxb
123
python
def leaveAutomaton(self, sub_configuration): 'Execute steps to leave a sub-automaton.\n\n @param sub_configuration: The configuration associated with\n the automata that has completed.\n\n @return: C{self}' assert (sub_configuration.__superConfiguration == self) self.__subConfiguration = None return self
def leaveAutomaton(self, sub_configuration): 'Execute steps to leave a sub-automaton.\n\n @param sub_configuration: The configuration associated with\n the automata that has completed.\n\n @return: C{self}' assert (sub_configuration.__superConfiguration == self) self.__subConfiguration = None return self<|docstring|>Execute steps to leave a sub-automaton. @param sub_configuration: The configuration associated with the automata that has completed. @return: C{self}<|endoftext|>
a4ed8877f6bdb5f6008830c67e8319bd1b0cd9eb9eb4630fd5862a224394b2be
def enterAutomaton(self, automaton): 'Execute steps to enter a new automaton.\n\n The new automaton is removed from the set of remaining\n automata for the current state, and a new configuration\n created. No transition is made in that new configuration.\n\n @param automaton: The automaton to be entered\n\n @return: The configuration that executes the new automaton as\n a sub-configuration of C{self}.' assert (self.__subConfiguration is None) assert (self.__subAutomata is not None) self.__subAutomata.remove(automaton) self.__subConfiguration = Configuration(automaton) self.__subConfiguration.__superConfiguration = self return self.__subConfiguration
Execute steps to enter a new automaton. The new automaton is removed from the set of remaining automata for the current state, and a new configuration created. No transition is made in that new configuration. @param automaton: The automaton to be entered @return: The configuration that executes the new automaton as a sub-configuration of C{self}.
pyxb/utils/fac.py
enterAutomaton
maciekwawro/pyxb
123
python
def enterAutomaton(self, automaton): 'Execute steps to enter a new automaton.\n\n The new automaton is removed from the set of remaining\n automata for the current state, and a new configuration\n created. No transition is made in that new configuration.\n\n @param automaton: The automaton to be entered\n\n @return: The configuration that executes the new automaton as\n a sub-configuration of C{self}.' assert (self.__subConfiguration is None) assert (self.__subAutomata is not None) self.__subAutomata.remove(automaton) self.__subConfiguration = Configuration(automaton) self.__subConfiguration.__superConfiguration = self return self.__subConfiguration
def enterAutomaton(self, automaton): 'Execute steps to enter a new automaton.\n\n The new automaton is removed from the set of remaining\n automata for the current state, and a new configuration\n created. No transition is made in that new configuration.\n\n @param automaton: The automaton to be entered\n\n @return: The configuration that executes the new automaton as\n a sub-configuration of C{self}.' assert (self.__subConfiguration is None) assert (self.__subAutomata is not None) self.__subAutomata.remove(automaton) self.__subConfiguration = Configuration(automaton) self.__subConfiguration.__superConfiguration = self return self.__subConfiguration<|docstring|>Execute steps to enter a new automaton. The new automaton is removed from the set of remaining automata for the current state, and a new configuration created. No transition is made in that new configuration. @param automaton: The automaton to be entered @return: The configuration that executes the new automaton as a sub-configuration of C{self}.<|endoftext|>
252891f73d430a91eb7a7305a931a0a676c8792931889ced938027e75d3c015b
def candidateTransitions(self, symbol=None): 'Return list of viable transitions on C{symbol}\n\n The transitions that are structurally permitted from this\n state, in order, filtering out those transitions where the\n update instruction is not satisfied by the configuration\n counter values and optionally those for which the symbol does\n not match.\n\n @param symbol: A symbol through which a transition from this\n state is intended. A value of C{None} indicates that the set\n of transitions should ignore the symbol; candidates are still\n filtered based on the counter state of the configuration.\n\n @return: A list of L{Transition} instances permitted from the\n current configuration. If C{symbol} is not C{None},\n transitions that would not accept the symbol are excluded.\n Any transition that would require an unsatisfied counter\n update is also excluded. Non-deterministic automata may\n result in a lits with multiple members. ' fac = self.__automaton transitions = [] if (symbol is None): match_filter = (lambda _xit: True) else: match_filter = (lambda _xit: _xit.consumingState().match(symbol)) update_filter = (lambda _xit: _xit.satisfiedBy(self)) if (self.__state is None): transitions.extend(fac.initialTransitions) elif ((self.__subConfiguration is not None) and (not self.__subConfiguration.isAccepting())): pass else: include_local = True if self.__subAutomata: (include_local, sub_initial) = self.__state.subAutomataInitialTransitions(self.__subAutomata) transitions.extend(map((lambda _xit: _xit.makeEnterAutomatonTransition()), sub_initial)) if include_local: for xit in filter(update_filter, self.__state.transitionSet): if (xit.consumingState() is not None): transitions.append(xit) else: (_, sub_initial) = xit.destination.subAutomataInitialTransitions() transitions.extend(map((lambda _xit: xit.chainTo(_xit.makeEnterAutomatonTransition())), sub_initial)) if ((self.__superConfiguration is not None) and self.isAccepting()): lxit = self.makeLeaveAutomatonTransition() supxit = self.__superConfiguration.candidateTransitions(symbol) transitions.extend(map((lambda _sx: lxit.chainTo(_sx)), supxit)) assert (len(frozenset(transitions)) == len(transitions)) return list(filter(update_filter, filter(match_filter, transitions)))
Return list of viable transitions on C{symbol} The transitions that are structurally permitted from this state, in order, filtering out those transitions where the update instruction is not satisfied by the configuration counter values and optionally those for which the symbol does not match. @param symbol: A symbol through which a transition from this state is intended. A value of C{None} indicates that the set of transitions should ignore the symbol; candidates are still filtered based on the counter state of the configuration. @return: A list of L{Transition} instances permitted from the current configuration. If C{symbol} is not C{None}, transitions that would not accept the symbol are excluded. Any transition that would require an unsatisfied counter update is also excluded. Non-deterministic automata may result in a lits with multiple members.
pyxb/utils/fac.py
candidateTransitions
maciekwawro/pyxb
123
python
def candidateTransitions(self, symbol=None): 'Return list of viable transitions on C{symbol}\n\n The transitions that are structurally permitted from this\n state, in order, filtering out those transitions where the\n update instruction is not satisfied by the configuration\n counter values and optionally those for which the symbol does\n not match.\n\n @param symbol: A symbol through which a transition from this\n state is intended. A value of C{None} indicates that the set\n of transitions should ignore the symbol; candidates are still\n filtered based on the counter state of the configuration.\n\n @return: A list of L{Transition} instances permitted from the\n current configuration. If C{symbol} is not C{None},\n transitions that would not accept the symbol are excluded.\n Any transition that would require an unsatisfied counter\n update is also excluded. Non-deterministic automata may\n result in a lits with multiple members. ' fac = self.__automaton transitions = [] if (symbol is None): match_filter = (lambda _xit: True) else: match_filter = (lambda _xit: _xit.consumingState().match(symbol)) update_filter = (lambda _xit: _xit.satisfiedBy(self)) if (self.__state is None): transitions.extend(fac.initialTransitions) elif ((self.__subConfiguration is not None) and (not self.__subConfiguration.isAccepting())): pass else: include_local = True if self.__subAutomata: (include_local, sub_initial) = self.__state.subAutomataInitialTransitions(self.__subAutomata) transitions.extend(map((lambda _xit: _xit.makeEnterAutomatonTransition()), sub_initial)) if include_local: for xit in filter(update_filter, self.__state.transitionSet): if (xit.consumingState() is not None): transitions.append(xit) else: (_, sub_initial) = xit.destination.subAutomataInitialTransitions() transitions.extend(map((lambda _xit: xit.chainTo(_xit.makeEnterAutomatonTransition())), sub_initial)) if ((self.__superConfiguration is not None) and self.isAccepting()): lxit = self.makeLeaveAutomatonTransition() supxit = self.__superConfiguration.candidateTransitions(symbol) transitions.extend(map((lambda _sx: lxit.chainTo(_sx)), supxit)) assert (len(frozenset(transitions)) == len(transitions)) return list(filter(update_filter, filter(match_filter, transitions)))
def candidateTransitions(self, symbol=None): 'Return list of viable transitions on C{symbol}\n\n The transitions that are structurally permitted from this\n state, in order, filtering out those transitions where the\n update instruction is not satisfied by the configuration\n counter values and optionally those for which the symbol does\n not match.\n\n @param symbol: A symbol through which a transition from this\n state is intended. A value of C{None} indicates that the set\n of transitions should ignore the symbol; candidates are still\n filtered based on the counter state of the configuration.\n\n @return: A list of L{Transition} instances permitted from the\n current configuration. If C{symbol} is not C{None},\n transitions that would not accept the symbol are excluded.\n Any transition that would require an unsatisfied counter\n update is also excluded. Non-deterministic automata may\n result in a lits with multiple members. ' fac = self.__automaton transitions = [] if (symbol is None): match_filter = (lambda _xit: True) else: match_filter = (lambda _xit: _xit.consumingState().match(symbol)) update_filter = (lambda _xit: _xit.satisfiedBy(self)) if (self.__state is None): transitions.extend(fac.initialTransitions) elif ((self.__subConfiguration is not None) and (not self.__subConfiguration.isAccepting())): pass else: include_local = True if self.__subAutomata: (include_local, sub_initial) = self.__state.subAutomataInitialTransitions(self.__subAutomata) transitions.extend(map((lambda _xit: _xit.makeEnterAutomatonTransition()), sub_initial)) if include_local: for xit in filter(update_filter, self.__state.transitionSet): if (xit.consumingState() is not None): transitions.append(xit) else: (_, sub_initial) = xit.destination.subAutomataInitialTransitions() transitions.extend(map((lambda _xit: xit.chainTo(_xit.makeEnterAutomatonTransition())), sub_initial)) if ((self.__superConfiguration is not None) and self.isAccepting()): lxit = self.makeLeaveAutomatonTransition() supxit = self.__superConfiguration.candidateTransitions(symbol) transitions.extend(map((lambda _sx: lxit.chainTo(_sx)), supxit)) assert (len(frozenset(transitions)) == len(transitions)) return list(filter(update_filter, filter(match_filter, transitions)))<|docstring|>Return list of viable transitions on C{symbol} The transitions that are structurally permitted from this state, in order, filtering out those transitions where the update instruction is not satisfied by the configuration counter values and optionally those for which the symbol does not match. @param symbol: A symbol through which a transition from this state is intended. A value of C{None} indicates that the set of transitions should ignore the symbol; candidates are still filtered based on the counter state of the configuration. @return: A list of L{Transition} instances permitted from the current configuration. If C{symbol} is not C{None}, transitions that would not accept the symbol are excluded. Any transition that would require an unsatisfied counter update is also excluded. Non-deterministic automata may result in a lits with multiple members.<|endoftext|>
7afac17488b9b79d32df3c4375d3e3d53a9c2175ab87be4d743ea3e1ddf8dc05
def isInitial(self): 'Return C{True} iff no transitions have ever been made.' return (self.__state is None)
Return C{True} iff no transitions have ever been made.
pyxb/utils/fac.py
isInitial
maciekwawro/pyxb
123
python
def isInitial(self): return (self.__state is None)
def isInitial(self): return (self.__state is None)<|docstring|>Return C{True} iff no transitions have ever been made.<|endoftext|>
9962fc32a2ecfb5780cdf86c1e64b3aca667dd518672ead3d01de7fc5d1808b6
def isAccepting(self): 'Return C{True} iff the automaton is in an accepting state.' if (self.__state is not None): if ((self.__subConfiguration is not None) and (not self.__subConfiguration.isAccepting())): return False if (self.__subAutomata is not None): if (not functools.reduce(operator.and_, map((lambda _sa: _sa.nullable), self.__subAutomata), True)): return False return self.__state.isAccepting(self.__counterValues) return self.__automaton.nullable
Return C{True} iff the automaton is in an accepting state.
pyxb/utils/fac.py
isAccepting
maciekwawro/pyxb
123
python
def isAccepting(self): if (self.__state is not None): if ((self.__subConfiguration is not None) and (not self.__subConfiguration.isAccepting())): return False if (self.__subAutomata is not None): if (not functools.reduce(operator.and_, map((lambda _sa: _sa.nullable), self.__subAutomata), True)): return False return self.__state.isAccepting(self.__counterValues) return self.__automaton.nullable
def isAccepting(self): if (self.__state is not None): if ((self.__subConfiguration is not None) and (not self.__subConfiguration.isAccepting())): return False if (self.__subAutomata is not None): if (not functools.reduce(operator.and_, map((lambda _sa: _sa.nullable), self.__subAutomata), True)): return False return self.__state.isAccepting(self.__counterValues) return self.__automaton.nullable<|docstring|>Return C{True} iff the automaton is in an accepting state.<|endoftext|>
f0aa245da0b0ceb204d45cce72790497a7fd522d36085e4371929b72f5c664ca
def clone(self, clone_map=None): 'Clone a configuration and its descendents.\n\n This is used for parallel execution where a configuration has\n multiple candidate transitions and must follow all of them.\n It clones the entire chain of configurations through\n multiple layers.\n\n @param clone_map: Optional map into which the translation from\n the original configuration object to the corresponding cloned\n configuration object can be reconstructed, e.g. when applying\n a transition that includes automata exits referencing\n superconfigurations from the original configuration.\n ' if (clone_map is None): clone_map = {} root = self while (root.__superConfiguration is not None): root = root.__superConfiguration root = root._clone(clone_map, None) return clone_map.get(self)
Clone a configuration and its descendents. This is used for parallel execution where a configuration has multiple candidate transitions and must follow all of them. It clones the entire chain of configurations through multiple layers. @param clone_map: Optional map into which the translation from the original configuration object to the corresponding cloned configuration object can be reconstructed, e.g. when applying a transition that includes automata exits referencing superconfigurations from the original configuration.
pyxb/utils/fac.py
clone
maciekwawro/pyxb
123
python
def clone(self, clone_map=None): 'Clone a configuration and its descendents.\n\n This is used for parallel execution where a configuration has\n multiple candidate transitions and must follow all of them.\n It clones the entire chain of configurations through\n multiple layers.\n\n @param clone_map: Optional map into which the translation from\n the original configuration object to the corresponding cloned\n configuration object can be reconstructed, e.g. when applying\n a transition that includes automata exits referencing\n superconfigurations from the original configuration.\n ' if (clone_map is None): clone_map = {} root = self while (root.__superConfiguration is not None): root = root.__superConfiguration root = root._clone(clone_map, None) return clone_map.get(self)
def clone(self, clone_map=None): 'Clone a configuration and its descendents.\n\n This is used for parallel execution where a configuration has\n multiple candidate transitions and must follow all of them.\n It clones the entire chain of configurations through\n multiple layers.\n\n @param clone_map: Optional map into which the translation from\n the original configuration object to the corresponding cloned\n configuration object can be reconstructed, e.g. when applying\n a transition that includes automata exits referencing\n superconfigurations from the original configuration.\n ' if (clone_map is None): clone_map = {} root = self while (root.__superConfiguration is not None): root = root.__superConfiguration root = root._clone(clone_map, None) return clone_map.get(self)<|docstring|>Clone a configuration and its descendents. This is used for parallel execution where a configuration has multiple candidate transitions and must follow all of them. It clones the entire chain of configurations through multiple layers. @param clone_map: Optional map into which the translation from the original configuration object to the corresponding cloned configuration object can be reconstructed, e.g. when applying a transition that includes automata exits referencing superconfigurations from the original configuration.<|endoftext|>
9d06d965c348349c495c862eb436fa96d850f69883b7b948c48601e92f75a39d
def acceptingConfigurations(self): 'Return the set of configurations that are in an accepting state.\n\n Note that some of the configurations may be within a\n sub-automaton; their presence in the return value is because\n the root configuration is also accepting.' accepting = [] for cfg in self.__configurations: rcfg = cfg while (rcfg.superConfiguration is not None): rcfg = rcfg.superConfiguration if rcfg.isAccepting(): accepting.append(cfg) return accepting
Return the set of configurations that are in an accepting state. Note that some of the configurations may be within a sub-automaton; their presence in the return value is because the root configuration is also accepting.
pyxb/utils/fac.py
acceptingConfigurations
maciekwawro/pyxb
123
python
def acceptingConfigurations(self): 'Return the set of configurations that are in an accepting state.\n\n Note that some of the configurations may be within a\n sub-automaton; their presence in the return value is because\n the root configuration is also accepting.' accepting = [] for cfg in self.__configurations: rcfg = cfg while (rcfg.superConfiguration is not None): rcfg = rcfg.superConfiguration if rcfg.isAccepting(): accepting.append(cfg) return accepting
def acceptingConfigurations(self): 'Return the set of configurations that are in an accepting state.\n\n Note that some of the configurations may be within a\n sub-automaton; their presence in the return value is because\n the root configuration is also accepting.' accepting = [] for cfg in self.__configurations: rcfg = cfg while (rcfg.superConfiguration is not None): rcfg = rcfg.superConfiguration if rcfg.isAccepting(): accepting.append(cfg) return accepting<|docstring|>Return the set of configurations that are in an accepting state. Note that some of the configurations may be within a sub-automaton; their presence in the return value is because the root configuration is also accepting.<|endoftext|>
013b9c41b79f56e6e6788bce301ae600a642c6e97f300aacc1dab342aafebecf
def __get_states(self): 'The set of L{State}s in the automaton.\n\n These correspond essentially to marked symbols in the original\n regular expression, or L{element\n declarations<pyxb.xmlschema.structures.ElementDeclaration>} in\n an XML schema.\n\n @note: These are conceptually a set and are stored that way.\n When an L{Automaton} is constructed the incoming states should\n be passed as a list so the calculated initial transitions are\n executed in a deterministic order.' return self.__states
The set of L{State}s in the automaton. These correspond essentially to marked symbols in the original regular expression, or L{element declarations<pyxb.xmlschema.structures.ElementDeclaration>} in an XML schema. @note: These are conceptually a set and are stored that way. When an L{Automaton} is constructed the incoming states should be passed as a list so the calculated initial transitions are executed in a deterministic order.
pyxb/utils/fac.py
__get_states
maciekwawro/pyxb
123
python
def __get_states(self): 'The set of L{State}s in the automaton.\n\n These correspond essentially to marked symbols in the original\n regular expression, or L{element\n declarations<pyxb.xmlschema.structures.ElementDeclaration>} in\n an XML schema.\n\n @note: These are conceptually a set and are stored that way.\n When an L{Automaton} is constructed the incoming states should\n be passed as a list so the calculated initial transitions are\n executed in a deterministic order.' return self.__states
def __get_states(self): 'The set of L{State}s in the automaton.\n\n These correspond essentially to marked symbols in the original\n regular expression, or L{element\n declarations<pyxb.xmlschema.structures.ElementDeclaration>} in\n an XML schema.\n\n @note: These are conceptually a set and are stored that way.\n When an L{Automaton} is constructed the incoming states should\n be passed as a list so the calculated initial transitions are\n executed in a deterministic order.' return self.__states<|docstring|>The set of L{State}s in the automaton. These correspond essentially to marked symbols in the original regular expression, or L{element declarations<pyxb.xmlschema.structures.ElementDeclaration>} in an XML schema. @note: These are conceptually a set and are stored that way. When an L{Automaton} is constructed the incoming states should be passed as a list so the calculated initial transitions are executed in a deterministic order.<|endoftext|>
75909c02c8558021ea63577a5ccef52179e4cdca80605e6d5feb6d6211311c14
def __get_counterConditions(self): 'The set of L{CounterCondition}s in the automaton.\n\n These are marked positions in the regular expression, or\n L{particles<pyxb.xmlschema.structures.Particle>} in an XML\n schema, paired with their occurrence constraints.' return self.__counterConditions
The set of L{CounterCondition}s in the automaton. These are marked positions in the regular expression, or L{particles<pyxb.xmlschema.structures.Particle>} in an XML schema, paired with their occurrence constraints.
pyxb/utils/fac.py
__get_counterConditions
maciekwawro/pyxb
123
python
def __get_counterConditions(self): 'The set of L{CounterCondition}s in the automaton.\n\n These are marked positions in the regular expression, or\n L{particles<pyxb.xmlschema.structures.Particle>} in an XML\n schema, paired with their occurrence constraints.' return self.__counterConditions
def __get_counterConditions(self): 'The set of L{CounterCondition}s in the automaton.\n\n These are marked positions in the regular expression, or\n L{particles<pyxb.xmlschema.structures.Particle>} in an XML\n schema, paired with their occurrence constraints.' return self.__counterConditions<|docstring|>The set of L{CounterCondition}s in the automaton. These are marked positions in the regular expression, or L{particles<pyxb.xmlschema.structures.Particle>} in an XML schema, paired with their occurrence constraints.<|endoftext|>
7c15074b4cdda42ab74890f457fa1769560c28c7d6d503421671a1891488761a
def __get_nullable(self): 'C{True} iff the automaton accepts the empty string.' return self.__nullable
C{True} iff the automaton accepts the empty string.
pyxb/utils/fac.py
__get_nullable
maciekwawro/pyxb
123
python
def __get_nullable(self): return self.__nullable
def __get_nullable(self): return self.__nullable<|docstring|>C{True} iff the automaton accepts the empty string.<|endoftext|>
c9371b3dda652485e134a3bf86338df3e87f6bfd53cde894d2ef9a5e23911a22
def __get_initialTransitions(self): 'The set of transitions that may be made to enter the automaton.\n\n These are full transitions, including chains into subautomata\n if an initial state represents a node with sub-automata.\n\n @note: As with L{State.transitionSet}, the set is represented\n as a list to preserve priority when resolving\n non-deterministic matches.' return self.__initialTransitions
The set of transitions that may be made to enter the automaton. These are full transitions, including chains into subautomata if an initial state represents a node with sub-automata. @note: As with L{State.transitionSet}, the set is represented as a list to preserve priority when resolving non-deterministic matches.
pyxb/utils/fac.py
__get_initialTransitions
maciekwawro/pyxb
123
python
def __get_initialTransitions(self): 'The set of transitions that may be made to enter the automaton.\n\n These are full transitions, including chains into subautomata\n if an initial state represents a node with sub-automata.\n\n @note: As with L{State.transitionSet}, the set is represented\n as a list to preserve priority when resolving\n non-deterministic matches.' return self.__initialTransitions
def __get_initialTransitions(self): 'The set of transitions that may be made to enter the automaton.\n\n These are full transitions, including chains into subautomata\n if an initial state represents a node with sub-automata.\n\n @note: As with L{State.transitionSet}, the set is represented\n as a list to preserve priority when resolving\n non-deterministic matches.' return self.__initialTransitions<|docstring|>The set of transitions that may be made to enter the automaton. These are full transitions, including chains into subautomata if an initial state represents a node with sub-automata. @note: As with L{State.transitionSet}, the set is represented as a list to preserve priority when resolving non-deterministic matches.<|endoftext|>
1657d777e7c084ffac7ec63e3de04ff9607867bcbdaf462bbe3ae9cc87b8910c
def __get_containingState(self): 'The L{State} instance for which this is a sub-automaton.\n\n C{None} if this is not a sub-automaton.' return self.__containingState
The L{State} instance for which this is a sub-automaton. C{None} if this is not a sub-automaton.
pyxb/utils/fac.py
__get_containingState
maciekwawro/pyxb
123
python
def __get_containingState(self): 'The L{State} instance for which this is a sub-automaton.\n\n C{None} if this is not a sub-automaton.' return self.__containingState
def __get_containingState(self): 'The L{State} instance for which this is a sub-automaton.\n\n C{None} if this is not a sub-automaton.' return self.__containingState<|docstring|>The L{State} instance for which this is a sub-automaton. C{None} if this is not a sub-automaton.<|endoftext|>
1bda20e337392ba9f669958e8b5beb8c2aef26e5d973e7ad5495631f140803d2
def __get_finalStates(self): 'The set of L{State} members which can terminate a match.' return self.__finalStates
The set of L{State} members which can terminate a match.
pyxb/utils/fac.py
__get_finalStates
maciekwawro/pyxb
123
python
def __get_finalStates(self): return self.__finalStates
def __get_finalStates(self): return self.__finalStates<|docstring|>The set of L{State} members which can terminate a match.<|endoftext|>
3af585adc40ed7e46da709cff60889b92059e56ad5a3037e326b8af8a2af4eca
def newConfiguration(self): 'Return a new L{Configuration} instance for this automaton.' return Configuration(self)
Return a new L{Configuration} instance for this automaton.
pyxb/utils/fac.py
newConfiguration
maciekwawro/pyxb
123
python
def newConfiguration(self): return Configuration(self)
def newConfiguration(self): return Configuration(self)<|docstring|>Return a new L{Configuration} instance for this automaton.<|endoftext|>
f544cab8f53806bb27332ade11b6888b635a5f15840e2f3d9035af693ad5914a
def __init__(self, **kw): 'Create a FAC term-tree node.\n\n @keyword metadata: Any application-specific metadata retained in\n the term tree for transfer to the resulting automaton.' self.__metadata = kw.get('metadata')
Create a FAC term-tree node. @keyword metadata: Any application-specific metadata retained in the term tree for transfer to the resulting automaton.
pyxb/utils/fac.py
__init__
maciekwawro/pyxb
123
python
def __init__(self, **kw): 'Create a FAC term-tree node.\n\n @keyword metadata: Any application-specific metadata retained in\n the term tree for transfer to the resulting automaton.' self.__metadata = kw.get('metadata')
def __init__(self, **kw): 'Create a FAC term-tree node.\n\n @keyword metadata: Any application-specific metadata retained in\n the term tree for transfer to the resulting automaton.' self.__metadata = kw.get('metadata')<|docstring|>Create a FAC term-tree node. @keyword metadata: Any application-specific metadata retained in the term tree for transfer to the resulting automaton.<|endoftext|>
8092d92d4958e9d09438fa46bb4ceab9a70f9506f0edabfe855d040fb17a4a1a
def clone(self, *args, **kw): 'Create a deep copy of the node.\n\n All term-tree--related attributes and properties are replaced\n with deep clones. Other attributes are preserved.\n\n @param args: A tuple of arguments to be passed to the instance\n constructor.\n\n @param kw: A dict of keywords to be passed to the instance\n constructor.\n\n @note: Subclasses should pre-extend this method to augment the\n C{args} and C{kw} parameters as necessary to match the\n expectations of the C{__init__} method of the class being\n cloned.' kw.setdefault('metadata', self.metadata) return type(self)(*args, **kw)
Create a deep copy of the node. All term-tree--related attributes and properties are replaced with deep clones. Other attributes are preserved. @param args: A tuple of arguments to be passed to the instance constructor. @param kw: A dict of keywords to be passed to the instance constructor. @note: Subclasses should pre-extend this method to augment the C{args} and C{kw} parameters as necessary to match the expectations of the C{__init__} method of the class being cloned.
pyxb/utils/fac.py
clone
maciekwawro/pyxb
123
python
def clone(self, *args, **kw): 'Create a deep copy of the node.\n\n All term-tree--related attributes and properties are replaced\n with deep clones. Other attributes are preserved.\n\n @param args: A tuple of arguments to be passed to the instance\n constructor.\n\n @param kw: A dict of keywords to be passed to the instance\n constructor.\n\n @note: Subclasses should pre-extend this method to augment the\n C{args} and C{kw} parameters as necessary to match the\n expectations of the C{__init__} method of the class being\n cloned.' kw.setdefault('metadata', self.metadata) return type(self)(*args, **kw)
def clone(self, *args, **kw): 'Create a deep copy of the node.\n\n All term-tree--related attributes and properties are replaced\n with deep clones. Other attributes are preserved.\n\n @param args: A tuple of arguments to be passed to the instance\n constructor.\n\n @param kw: A dict of keywords to be passed to the instance\n constructor.\n\n @note: Subclasses should pre-extend this method to augment the\n C{args} and C{kw} parameters as necessary to match the\n expectations of the C{__init__} method of the class being\n cloned.' kw.setdefault('metadata', self.metadata) return type(self)(*args, **kw)<|docstring|>Create a deep copy of the node. All term-tree--related attributes and properties are replaced with deep clones. Other attributes are preserved. @param args: A tuple of arguments to be passed to the instance constructor. @param kw: A dict of keywords to be passed to the instance constructor. @note: Subclasses should pre-extend this method to augment the C{args} and C{kw} parameters as necessary to match the expectations of the C{__init__} method of the class being cloned.<|endoftext|>
d637e2ea757a0b8d5abff25c80a2508c2dd519ec37b58cd6e55219984c2b6e11
def __get_metadata(self): 'Application-specific metadata provided during construction.' return self.__metadata
Application-specific metadata provided during construction.
pyxb/utils/fac.py
__get_metadata
maciekwawro/pyxb
123
python
def __get_metadata(self): return self.__metadata
def __get_metadata(self): return self.__metadata<|docstring|>Application-specific metadata provided during construction.<|endoftext|>
8da19e5e451eb5e3df13de1023c322561f7183f92aa6d3f615f7739007f4e0cc
def __get_first(self): 'The I{first} set for the node.\n\n This is the set of positions leading to symbols that can\n appear first in a string matched by an execution starting at\n the node.' if (self.__first is None): self.__first = frozenset(self._first()) return self.__first
The I{first} set for the node. This is the set of positions leading to symbols that can appear first in a string matched by an execution starting at the node.
pyxb/utils/fac.py
__get_first
maciekwawro/pyxb
123
python
def __get_first(self): 'The I{first} set for the node.\n\n This is the set of positions leading to symbols that can\n appear first in a string matched by an execution starting at\n the node.' if (self.__first is None): self.__first = frozenset(self._first()) return self.__first
def __get_first(self): 'The I{first} set for the node.\n\n This is the set of positions leading to symbols that can\n appear first in a string matched by an execution starting at\n the node.' if (self.__first is None): self.__first = frozenset(self._first()) return self.__first<|docstring|>The I{first} set for the node. This is the set of positions leading to symbols that can appear first in a string matched by an execution starting at the node.<|endoftext|>
beba7e72c49e815d9fad27caee4b0cefbb1fbffe8d3b715c8c7c01e8cb205457
def _first(self): 'Abstract method that defines L{first} for the subclass.\n\n The return value should be an iterable of tuples of integers\n denoting paths from this node through the term tree to a\n symbol.' raise NotImplementedError(('%s.first' % (type(self).__name__,)))
Abstract method that defines L{first} for the subclass. The return value should be an iterable of tuples of integers denoting paths from this node through the term tree to a symbol.
pyxb/utils/fac.py
_first
maciekwawro/pyxb
123
python
def _first(self): 'Abstract method that defines L{first} for the subclass.\n\n The return value should be an iterable of tuples of integers\n denoting paths from this node through the term tree to a\n symbol.' raise NotImplementedError(('%s.first' % (type(self).__name__,)))
def _first(self): 'Abstract method that defines L{first} for the subclass.\n\n The return value should be an iterable of tuples of integers\n denoting paths from this node through the term tree to a\n symbol.' raise NotImplementedError(('%s.first' % (type(self).__name__,)))<|docstring|>Abstract method that defines L{first} for the subclass. The return value should be an iterable of tuples of integers denoting paths from this node through the term tree to a symbol.<|endoftext|>
9943f60653537f6447fc3dfe89a5cd4259de7d24af36df57af348ea690512cb1
def __get_last(self): 'The I{last} set for the node.\n\n This is the set of positions leading to symbols that can\n appear last in a string matched by an execution starting at\n the node.' if (self.__last is None): self.__last = frozenset(self._last()) return self.__last
The I{last} set for the node. This is the set of positions leading to symbols that can appear last in a string matched by an execution starting at the node.
pyxb/utils/fac.py
__get_last
maciekwawro/pyxb
123
python
def __get_last(self): 'The I{last} set for the node.\n\n This is the set of positions leading to symbols that can\n appear last in a string matched by an execution starting at\n the node.' if (self.__last is None): self.__last = frozenset(self._last()) return self.__last
def __get_last(self): 'The I{last} set for the node.\n\n This is the set of positions leading to symbols that can\n appear last in a string matched by an execution starting at\n the node.' if (self.__last is None): self.__last = frozenset(self._last()) return self.__last<|docstring|>The I{last} set for the node. This is the set of positions leading to symbols that can appear last in a string matched by an execution starting at the node.<|endoftext|>
9f9fcfa7dd28dad974d46b934144b5d6013f9664050eba3118cfe023a76529d2
def _last(self): 'Abstract method that defines L{last} for the subclass.\n\n The return value should be an iterable of tuples of integers\n denoting paths from this node through the term tree to a\n symbol.' raise NotImplementedError(('%s.last' % (type(self).__name__,)))
Abstract method that defines L{last} for the subclass. The return value should be an iterable of tuples of integers denoting paths from this node through the term tree to a symbol.
pyxb/utils/fac.py
_last
maciekwawro/pyxb
123
python
def _last(self): 'Abstract method that defines L{last} for the subclass.\n\n The return value should be an iterable of tuples of integers\n denoting paths from this node through the term tree to a\n symbol.' raise NotImplementedError(('%s.last' % (type(self).__name__,)))
def _last(self): 'Abstract method that defines L{last} for the subclass.\n\n The return value should be an iterable of tuples of integers\n denoting paths from this node through the term tree to a\n symbol.' raise NotImplementedError(('%s.last' % (type(self).__name__,)))<|docstring|>Abstract method that defines L{last} for the subclass. The return value should be an iterable of tuples of integers denoting paths from this node through the term tree to a symbol.<|endoftext|>
5e942834fec1b56dcc72a45f33ea4f5c0a38ffcba057f9a71f21c94580e9501b
def __get_nullable(self): 'C{True} iff the empty string is accepted by this node.' if (self.__nullable is None): self.__nullable = self._nullable() return self.__nullable
C{True} iff the empty string is accepted by this node.
pyxb/utils/fac.py
__get_nullable
maciekwawro/pyxb
123
python
def __get_nullable(self): if (self.__nullable is None): self.__nullable = self._nullable() return self.__nullable
def __get_nullable(self): if (self.__nullable is None): self.__nullable = self._nullable() return self.__nullable<|docstring|>C{True} iff the empty string is accepted by this node.<|endoftext|>
99e51541304e94340ee377795914e27b68475c529b2818cf9b2fc0cceef9dd89
def _nullable(self): 'Abstract method that defines L{nullable} for the subclass.\n\n The return value should be C{True} or C{False}.' raise NotImplementedError(('%s.nullable' % (type(self).__name__,)))
Abstract method that defines L{nullable} for the subclass. The return value should be C{True} or C{False}.
pyxb/utils/fac.py
_nullable
maciekwawro/pyxb
123
python
def _nullable(self): 'Abstract method that defines L{nullable} for the subclass.\n\n The return value should be C{True} or C{False}.' raise NotImplementedError(('%s.nullable' % (type(self).__name__,)))
def _nullable(self): 'Abstract method that defines L{nullable} for the subclass.\n\n The return value should be C{True} or C{False}.' raise NotImplementedError(('%s.nullable' % (type(self).__name__,)))<|docstring|>Abstract method that defines L{nullable} for the subclass. The return value should be C{True} or C{False}.<|endoftext|>
7cffa8a3fabe2ca24acbd9ddb28c569d4b70cbaa3003c59cd3aa518cc1018480
def __get_follow(self): 'The I{follow} map for the node.' if (self.__follow is None): self.__follow = self._follow() return self.__follow
The I{follow} map for the node.
pyxb/utils/fac.py
__get_follow
maciekwawro/pyxb
123
python
def __get_follow(self): if (self.__follow is None): self.__follow = self._follow() return self.__follow
def __get_follow(self): if (self.__follow is None): self.__follow = self._follow() return self.__follow<|docstring|>The I{follow} map for the node.<|endoftext|>
251767de9d7070bd3ab60ea4242e1863e2a373913844e8dbe6c07514a8659a0a
def _follow(self): 'Abstract method that defines L{follow} for the subclass.\n\n The return value should be a map from tuples of integers (positions)\n to a list of transitions, where a transition is a position and\n an update instruction.' raise NotImplementedError(('%s.follow' % (type(self).__name__,)))
Abstract method that defines L{follow} for the subclass. The return value should be a map from tuples of integers (positions) to a list of transitions, where a transition is a position and an update instruction.
pyxb/utils/fac.py
_follow
maciekwawro/pyxb
123
python
def _follow(self): 'Abstract method that defines L{follow} for the subclass.\n\n The return value should be a map from tuples of integers (positions)\n to a list of transitions, where a transition is a position and\n an update instruction.' raise NotImplementedError(('%s.follow' % (type(self).__name__,)))
def _follow(self): 'Abstract method that defines L{follow} for the subclass.\n\n The return value should be a map from tuples of integers (positions)\n to a list of transitions, where a transition is a position and\n an update instruction.' raise NotImplementedError(('%s.follow' % (type(self).__name__,)))<|docstring|>Abstract method that defines L{follow} for the subclass. The return value should be a map from tuples of integers (positions) to a list of transitions, where a transition is a position and an update instruction.<|endoftext|>
c07ea279ab5b122a34bce1d549570a8422443b489e889ba0881c484a193b41c9
def reset(self): 'Reset any term-tree state associated with the node.\n\n Any change to the structure of the term tree in which the node\n appears invalidates memoized first/follow sets and related\n information. This method clears all that data so it can be\n recalculated. It does not clear the L{metadata} link, or any\n existing structural data.' self.__first = None self.__last = None self.__nullable = None self.__follow = None self.__counterPositions = None
Reset any term-tree state associated with the node. Any change to the structure of the term tree in which the node appears invalidates memoized first/follow sets and related information. This method clears all that data so it can be recalculated. It does not clear the L{metadata} link, or any existing structural data.
pyxb/utils/fac.py
reset
maciekwawro/pyxb
123
python
def reset(self): 'Reset any term-tree state associated with the node.\n\n Any change to the structure of the term tree in which the node\n appears invalidates memoized first/follow sets and related\n information. This method clears all that data so it can be\n recalculated. It does not clear the L{metadata} link, or any\n existing structural data.' self.__first = None self.__last = None self.__nullable = None self.__follow = None self.__counterPositions = None
def reset(self): 'Reset any term-tree state associated with the node.\n\n Any change to the structure of the term tree in which the node\n appears invalidates memoized first/follow sets and related\n information. This method clears all that data so it can be\n recalculated. It does not clear the L{metadata} link, or any\n existing structural data.' self.__first = None self.__last = None self.__nullable = None self.__follow = None self.__counterPositions = None<|docstring|>Reset any term-tree state associated with the node. Any change to the structure of the term tree in which the node appears invalidates memoized first/follow sets and related information. This method clears all that data so it can be recalculated. It does not clear the L{metadata} link, or any existing structural data.<|endoftext|>
3cc472b8f8cdb9b02c949ec61e993ed0698848def00d4c5d21c660796edf23ee
def walkTermTree(self, pre, post, arg): 'Utility function for term tree processing.\n\n @param pre: a callable that, unless C{None}, is invoked at\n each node C{n} with parameters C{n}, C{pos}, and C{arg}, where\n C{pos} is the tuple of integers identifying the path from the\n node at on which this method was invoked to the node being\n processed. The invocation occurs before processing any\n subordinate nodes.\n\n @param post: as with C{pre} but invocation occurs after\n processing any subordinate nodes.\n\n @param arg: a value passed to invocations of C{pre} and\n C{post}.' self._walkTermTree((), pre, post, arg)
Utility function for term tree processing. @param pre: a callable that, unless C{None}, is invoked at each node C{n} with parameters C{n}, C{pos}, and C{arg}, where C{pos} is the tuple of integers identifying the path from the node at on which this method was invoked to the node being processed. The invocation occurs before processing any subordinate nodes. @param post: as with C{pre} but invocation occurs after processing any subordinate nodes. @param arg: a value passed to invocations of C{pre} and C{post}.
pyxb/utils/fac.py
walkTermTree
maciekwawro/pyxb
123
python
def walkTermTree(self, pre, post, arg): 'Utility function for term tree processing.\n\n @param pre: a callable that, unless C{None}, is invoked at\n each node C{n} with parameters C{n}, C{pos}, and C{arg}, where\n C{pos} is the tuple of integers identifying the path from the\n node at on which this method was invoked to the node being\n processed. The invocation occurs before processing any\n subordinate nodes.\n\n @param post: as with C{pre} but invocation occurs after\n processing any subordinate nodes.\n\n @param arg: a value passed to invocations of C{pre} and\n C{post}.' self._walkTermTree((), pre, post, arg)
def walkTermTree(self, pre, post, arg): 'Utility function for term tree processing.\n\n @param pre: a callable that, unless C{None}, is invoked at\n each node C{n} with parameters C{n}, C{pos}, and C{arg}, where\n C{pos} is the tuple of integers identifying the path from the\n node at on which this method was invoked to the node being\n processed. The invocation occurs before processing any\n subordinate nodes.\n\n @param post: as with C{pre} but invocation occurs after\n processing any subordinate nodes.\n\n @param arg: a value passed to invocations of C{pre} and\n C{post}.' self._walkTermTree((), pre, post, arg)<|docstring|>Utility function for term tree processing. @param pre: a callable that, unless C{None}, is invoked at each node C{n} with parameters C{n}, C{pos}, and C{arg}, where C{pos} is the tuple of integers identifying the path from the node at on which this method was invoked to the node being processed. The invocation occurs before processing any subordinate nodes. @param post: as with C{pre} but invocation occurs after processing any subordinate nodes. @param arg: a value passed to invocations of C{pre} and C{post}.<|endoftext|>
ba268ca0ff7ac2292f356b3af82f96be0e58fbac8682bef6ff270cfd18136eea
def _walkTermTree(self, position, pre, post, arg): 'Abstract method implementing L{walkTermTree} for the subclass.' raise NotImplementedError(('%s.walkTermTree' % (type(self).__name__,)))
Abstract method implementing L{walkTermTree} for the subclass.
pyxb/utils/fac.py
_walkTermTree
maciekwawro/pyxb
123
python
def _walkTermTree(self, position, pre, post, arg): raise NotImplementedError(('%s.walkTermTree' % (type(self).__name__,)))
def _walkTermTree(self, position, pre, post, arg): raise NotImplementedError(('%s.walkTermTree' % (type(self).__name__,)))<|docstring|>Abstract method implementing L{walkTermTree} for the subclass.<|endoftext|>
b20dd98b8db916d92ef00a8851e004613cb6a4cd91f6e1b4dd2990383da211a1
def __get_posNodeMap(self): 'A map from positions to nodes in the term tree.' if (self.__posNodeMap is None): pnm = {} self.walkTermTree((lambda _n, _p, _a: _a.setdefault(_p, _n)), None, pnm) self.__posNodeMap = pnm return self.__posNodeMap
A map from positions to nodes in the term tree.
pyxb/utils/fac.py
__get_posNodeMap
maciekwawro/pyxb
123
python
def __get_posNodeMap(self): if (self.__posNodeMap is None): pnm = {} self.walkTermTree((lambda _n, _p, _a: _a.setdefault(_p, _n)), None, pnm) self.__posNodeMap = pnm return self.__posNodeMap
def __get_posNodeMap(self): if (self.__posNodeMap is None): pnm = {} self.walkTermTree((lambda _n, _p, _a: _a.setdefault(_p, _n)), None, pnm) self.__posNodeMap = pnm return self.__posNodeMap<|docstring|>A map from positions to nodes in the term tree.<|endoftext|>
d37439e7bd8d5bb8d52e160014f3c8ddae0e201403066df509c3bffae36399dc
def __get_nodePosMap(self): 'A map from nodes to their position in the term tree.' if (self.__nodePosMap is None): npm = {} for (p, n) in six.iteritems(self.posNodeMap): npm[n] = p self.__nodePosMap = npm return self.__nodePosMap
A map from nodes to their position in the term tree.
pyxb/utils/fac.py
__get_nodePosMap
maciekwawro/pyxb
123
python
def __get_nodePosMap(self): if (self.__nodePosMap is None): npm = {} for (p, n) in six.iteritems(self.posNodeMap): npm[n] = p self.__nodePosMap = npm return self.__nodePosMap
def __get_nodePosMap(self): if (self.__nodePosMap is None): npm = {} for (p, n) in six.iteritems(self.posNodeMap): npm[n] = p self.__nodePosMap = npm return self.__nodePosMap<|docstring|>A map from nodes to their position in the term tree.<|endoftext|>
e845193334356f3db65a95343550ed355be7ae06d1b6378cb718d11999de9619
@classmethod def _PosConcatPosSet(cls, pos, pos_set): 'Implement definition 11.1 in B{HOV09}.' return frozenset([(pos + _mp) for _mp in pos_set])
Implement definition 11.1 in B{HOV09}.
pyxb/utils/fac.py
_PosConcatPosSet
maciekwawro/pyxb
123
python
@classmethod def _PosConcatPosSet(cls, pos, pos_set): return frozenset([(pos + _mp) for _mp in pos_set])
@classmethod def _PosConcatPosSet(cls, pos, pos_set): return frozenset([(pos + _mp) for _mp in pos_set])<|docstring|>Implement definition 11.1 in B{HOV09}.<|endoftext|>
d15d89da44a07cc10dd42b56240bfd02c99a12830344f1ab1ad9db1a89d068e9
@classmethod def _PosConcatUpdateInstruction(cls, pos, psi): 'Implement definition 11.2 in B{HOV09}' rv = {} for (q, v) in six.iteritems(psi): rv[(pos + q)] = v return rv
Implement definition 11.2 in B{HOV09}
pyxb/utils/fac.py
_PosConcatUpdateInstruction
maciekwawro/pyxb
123
python
@classmethod def _PosConcatUpdateInstruction(cls, pos, psi): rv = {} for (q, v) in six.iteritems(psi): rv[(pos + q)] = v return rv
@classmethod def _PosConcatUpdateInstruction(cls, pos, psi): rv = {} for (q, v) in six.iteritems(psi): rv[(pos + q)] = v return rv<|docstring|>Implement definition 11.2 in B{HOV09}<|endoftext|>
bc311784a5fb67d87569a14848048a8ade5f607358fa9a3068669b03a776fefa
@classmethod def _PosConcatTransitionSet(cls, pos, transition_set): 'Implement definition 11.3 in B{HOV09}' ts = [] for (q, psi) in transition_set: ts.append(((pos + q), cls._PosConcatUpdateInstruction(pos, psi))) return ts
Implement definition 11.3 in B{HOV09}
pyxb/utils/fac.py
_PosConcatTransitionSet
maciekwawro/pyxb
123
python
@classmethod def _PosConcatTransitionSet(cls, pos, transition_set): ts = [] for (q, psi) in transition_set: ts.append(((pos + q), cls._PosConcatUpdateInstruction(pos, psi))) return ts
@classmethod def _PosConcatTransitionSet(cls, pos, transition_set): ts = [] for (q, psi) in transition_set: ts.append(((pos + q), cls._PosConcatUpdateInstruction(pos, psi))) return ts<|docstring|>Implement definition 11.3 in B{HOV09}<|endoftext|>
3a8007f84696b5dbbd71626d2b2eed8f911eb8271202e8041123543dedcfd9b2
def __get_counterPositions(self): 'Implement definition 13.1 from B{HOV09}.\n\n The return value is the set of all positions leading to\n L{NumericalConstraint} nodes for which either the minimum\n value is not 1 or the maximum value is not unbounded.' if (self.__counterPositions is None): cpos = [] self.walkTermTree((lambda _n, _p, _a: (isinstance(_n, NumericalConstraint) and ((1 != _n.min) or (_n.max is not None)) and _a.append(_p))), None, cpos) self.__counterPositions = frozenset(cpos) return self.__counterPositions
Implement definition 13.1 from B{HOV09}. The return value is the set of all positions leading to L{NumericalConstraint} nodes for which either the minimum value is not 1 or the maximum value is not unbounded.
pyxb/utils/fac.py
__get_counterPositions
maciekwawro/pyxb
123
python
def __get_counterPositions(self): 'Implement definition 13.1 from B{HOV09}.\n\n The return value is the set of all positions leading to\n L{NumericalConstraint} nodes for which either the minimum\n value is not 1 or the maximum value is not unbounded.' if (self.__counterPositions is None): cpos = [] self.walkTermTree((lambda _n, _p, _a: (isinstance(_n, NumericalConstraint) and ((1 != _n.min) or (_n.max is not None)) and _a.append(_p))), None, cpos) self.__counterPositions = frozenset(cpos) return self.__counterPositions
def __get_counterPositions(self): 'Implement definition 13.1 from B{HOV09}.\n\n The return value is the set of all positions leading to\n L{NumericalConstraint} nodes for which either the minimum\n value is not 1 or the maximum value is not unbounded.' if (self.__counterPositions is None): cpos = [] self.walkTermTree((lambda _n, _p, _a: (isinstance(_n, NumericalConstraint) and ((1 != _n.min) or (_n.max is not None)) and _a.append(_p))), None, cpos) self.__counterPositions = frozenset(cpos) return self.__counterPositions<|docstring|>Implement definition 13.1 from B{HOV09}. The return value is the set of all positions leading to L{NumericalConstraint} nodes for which either the minimum value is not 1 or the maximum value is not unbounded.<|endoftext|>
bc385d77a077cf97fd74b5a3a22198f5ec15b28d19b06f46840e578b18f28364
def counterSubPositions(self, pos): 'Implement definition 13.2 from B{HOV09}.\n\n This is the subset of L{counterPositions} that occur along the\n path to C{pos}.' rv = set() for cpos in self.counterPositions: if (cpos == pos[:len(cpos)]): rv.add(cpos) return frozenset(rv)
Implement definition 13.2 from B{HOV09}. This is the subset of L{counterPositions} that occur along the path to C{pos}.
pyxb/utils/fac.py
counterSubPositions
maciekwawro/pyxb
123
python
def counterSubPositions(self, pos): 'Implement definition 13.2 from B{HOV09}.\n\n This is the subset of L{counterPositions} that occur along the\n path to C{pos}.' rv = set() for cpos in self.counterPositions: if (cpos == pos[:len(cpos)]): rv.add(cpos) return frozenset(rv)
def counterSubPositions(self, pos): 'Implement definition 13.2 from B{HOV09}.\n\n This is the subset of L{counterPositions} that occur along the\n path to C{pos}.' rv = set() for cpos in self.counterPositions: if (cpos == pos[:len(cpos)]): rv.add(cpos) return frozenset(rv)<|docstring|>Implement definition 13.2 from B{HOV09}. This is the subset of L{counterPositions} that occur along the path to C{pos}.<|endoftext|>
e8a2cdb334f97767401d0bb422eab07d802024d0437ef83a2a5c803c48a2eaad
def _facToString(self): 'Obtain a description of the FAC in text format.\n\n This is a diagnostic tool, returning first, last, and follow\n maps using positions.' rv = [] rv.append(('r\t= %s' % (str(self),))) states = list(six.iterkeys(self.follow)) rv.append(('sym(r)\t= %s' % ' '.join(map(str, map(self.posNodeMap.get, states))))) rv.append(('first(r)\t= %s' % ' '.join(map(str, self.first)))) rv.append(('last(r)\t= %s' % ' '.join(map(str, self.last)))) rv.append(('C\t= %s' % ' '.join(map(str, self.counterPositions)))) for pos in self.first: rv.append(('qI(%s) -> %s' % (self.posNodeMap[pos].metadata, str(pos)))) for spos in states: for (dpos, transition_set) in self.follow[spos]: dst = self.posNodeMap[dpos] uv = [] for (c, u) in six.iteritems(transition_set): uv.append(('%s %s' % ((((u == self.INCREMENT) and 'inc') or 'rst'), str(c)))) rv.append(('%s -%s-> %s ; %s' % (str(spos), dst.metadata, str(dpos), ' ; '.join(uv)))) return '\n'.join(rv)
Obtain a description of the FAC in text format. This is a diagnostic tool, returning first, last, and follow maps using positions.
pyxb/utils/fac.py
_facToString
maciekwawro/pyxb
123
python
def _facToString(self): 'Obtain a description of the FAC in text format.\n\n This is a diagnostic tool, returning first, last, and follow\n maps using positions.' rv = [] rv.append(('r\t= %s' % (str(self),))) states = list(six.iterkeys(self.follow)) rv.append(('sym(r)\t= %s' % ' '.join(map(str, map(self.posNodeMap.get, states))))) rv.append(('first(r)\t= %s' % ' '.join(map(str, self.first)))) rv.append(('last(r)\t= %s' % ' '.join(map(str, self.last)))) rv.append(('C\t= %s' % ' '.join(map(str, self.counterPositions)))) for pos in self.first: rv.append(('qI(%s) -> %s' % (self.posNodeMap[pos].metadata, str(pos)))) for spos in states: for (dpos, transition_set) in self.follow[spos]: dst = self.posNodeMap[dpos] uv = [] for (c, u) in six.iteritems(transition_set): uv.append(('%s %s' % ((((u == self.INCREMENT) and 'inc') or 'rst'), str(c)))) rv.append(('%s -%s-> %s ; %s' % (str(spos), dst.metadata, str(dpos), ' ; '.join(uv)))) return '\n'.join(rv)
def _facToString(self): 'Obtain a description of the FAC in text format.\n\n This is a diagnostic tool, returning first, last, and follow\n maps using positions.' rv = [] rv.append(('r\t= %s' % (str(self),))) states = list(six.iterkeys(self.follow)) rv.append(('sym(r)\t= %s' % ' '.join(map(str, map(self.posNodeMap.get, states))))) rv.append(('first(r)\t= %s' % ' '.join(map(str, self.first)))) rv.append(('last(r)\t= %s' % ' '.join(map(str, self.last)))) rv.append(('C\t= %s' % ' '.join(map(str, self.counterPositions)))) for pos in self.first: rv.append(('qI(%s) -> %s' % (self.posNodeMap[pos].metadata, str(pos)))) for spos in states: for (dpos, transition_set) in self.follow[spos]: dst = self.posNodeMap[dpos] uv = [] for (c, u) in six.iteritems(transition_set): uv.append(('%s %s' % ((((u == self.INCREMENT) and 'inc') or 'rst'), str(c)))) rv.append(('%s -%s-> %s ; %s' % (str(spos), dst.metadata, str(dpos), ' ; '.join(uv)))) return '\n'.join(rv)<|docstring|>Obtain a description of the FAC in text format. This is a diagnostic tool, returning first, last, and follow maps using positions.<|endoftext|>
b2c8fcb4f5d52aadd86b4a0226e484f574004d4efeeec72d4687c51d356a1f3f
def __get_terms(self): 'The set of subordinate terms of the current node.' return self.__terms
The set of subordinate terms of the current node.
pyxb/utils/fac.py
__get_terms
maciekwawro/pyxb
123
python
def __get_terms(self): return self.__terms
def __get_terms(self): return self.__terms<|docstring|>The set of subordinate terms of the current node.<|endoftext|>
a9e2b1ee26c88dd8f3432c4db3b2ff30c056b2f3e3ab2bb707fa9dfb787594d0
def __init__(self, *terms, **kw): 'Term that collects an ordered sequence of terms.\n\n The terms are provided as arguments. All must be instances of\n a subclass of L{Node}.' super(MultiTermNode, self).__init__(**kw) self.__terms = terms
Term that collects an ordered sequence of terms. The terms are provided as arguments. All must be instances of a subclass of L{Node}.
pyxb/utils/fac.py
__init__
maciekwawro/pyxb
123
python
def __init__(self, *terms, **kw): 'Term that collects an ordered sequence of terms.\n\n The terms are provided as arguments. All must be instances of\n a subclass of L{Node}.' super(MultiTermNode, self).__init__(**kw) self.__terms = terms
def __init__(self, *terms, **kw): 'Term that collects an ordered sequence of terms.\n\n The terms are provided as arguments. All must be instances of\n a subclass of L{Node}.' super(MultiTermNode, self).__init__(**kw) self.__terms = terms<|docstring|>Term that collects an ordered sequence of terms. The terms are provided as arguments. All must be instances of a subclass of L{Node}.<|endoftext|>
c87dab85330baec32a9acaa57aea9c6ebd868764ae27dcb74bebac3312cec83a
def __init__(self, term, min=0, max=1, **kw): 'Term with a numerical constraint.\n\n @param term: A term, the number of appearances of which is\n constrained in this term.\n @type term: L{Node}\n\n @keyword min: The minimum number of occurrences of C{term}.\n The value must be non-negative.\n\n @keyword max: The maximum number of occurrences of C{term}.\n The value must be positive (in which case it must also be no\n smaller than C{min}), or C{None} to indicate an unbounded\n number of occurrences.' super(NumericalConstraint, self).__init__(**kw) self.__term = term self.__min = min self.__max = max
Term with a numerical constraint. @param term: A term, the number of appearances of which is constrained in this term. @type term: L{Node} @keyword min: The minimum number of occurrences of C{term}. The value must be non-negative. @keyword max: The maximum number of occurrences of C{term}. The value must be positive (in which case it must also be no smaller than C{min}), or C{None} to indicate an unbounded number of occurrences.
pyxb/utils/fac.py
__init__
maciekwawro/pyxb
123
python
def __init__(self, term, min=0, max=1, **kw): 'Term with a numerical constraint.\n\n @param term: A term, the number of appearances of which is\n constrained in this term.\n @type term: L{Node}\n\n @keyword min: The minimum number of occurrences of C{term}.\n The value must be non-negative.\n\n @keyword max: The maximum number of occurrences of C{term}.\n The value must be positive (in which case it must also be no\n smaller than C{min}), or C{None} to indicate an unbounded\n number of occurrences.' super(NumericalConstraint, self).__init__(**kw) self.__term = term self.__min = min self.__max = max
def __init__(self, term, min=0, max=1, **kw): 'Term with a numerical constraint.\n\n @param term: A term, the number of appearances of which is\n constrained in this term.\n @type term: L{Node}\n\n @keyword min: The minimum number of occurrences of C{term}.\n The value must be non-negative.\n\n @keyword max: The maximum number of occurrences of C{term}.\n The value must be positive (in which case it must also be no\n smaller than C{min}), or C{None} to indicate an unbounded\n number of occurrences.' super(NumericalConstraint, self).__init__(**kw) self.__term = term self.__min = min self.__max = max<|docstring|>Term with a numerical constraint. @param term: A term, the number of appearances of which is constrained in this term. @type term: L{Node} @keyword min: The minimum number of occurrences of C{term}. The value must be non-negative. @keyword max: The maximum number of occurrences of C{term}. The value must be positive (in which case it must also be no smaller than C{min}), or C{None} to indicate an unbounded number of occurrences.<|endoftext|>
e416a5de2504575028bff40a19d87a0dfde12867839db9a87a119ae86372dab1
def __init__(self, *terms, **kw): 'Term that selects one of a set of terms.\n\n The terms are provided as arguments. All must be instances of\n a subclass of L{Node}.' super(Choice, self).__init__(*terms, **kw)
Term that selects one of a set of terms. The terms are provided as arguments. All must be instances of a subclass of L{Node}.
pyxb/utils/fac.py
__init__
maciekwawro/pyxb
123
python
def __init__(self, *terms, **kw): 'Term that selects one of a set of terms.\n\n The terms are provided as arguments. All must be instances of\n a subclass of L{Node}.' super(Choice, self).__init__(*terms, **kw)
def __init__(self, *terms, **kw): 'Term that selects one of a set of terms.\n\n The terms are provided as arguments. All must be instances of\n a subclass of L{Node}.' super(Choice, self).__init__(*terms, **kw)<|docstring|>Term that selects one of a set of terms. The terms are provided as arguments. All must be instances of a subclass of L{Node}.<|endoftext|>
564aed2fa5393bf74ea74e0965693530460da0d261b5bcc523ecf4297f28a383
def __init__(self, *terms, **kw): 'Term that collects an ordered sequence of terms.\n\n The terms are provided as arguments. All must be instances of\n a subclass of L{Node}.' super(Sequence, self).__init__(*terms, **kw)
Term that collects an ordered sequence of terms. The terms are provided as arguments. All must be instances of a subclass of L{Node}.
pyxb/utils/fac.py
__init__
maciekwawro/pyxb
123
python
def __init__(self, *terms, **kw): 'Term that collects an ordered sequence of terms.\n\n The terms are provided as arguments. All must be instances of\n a subclass of L{Node}.' super(Sequence, self).__init__(*terms, **kw)
def __init__(self, *terms, **kw): 'Term that collects an ordered sequence of terms.\n\n The terms are provided as arguments. All must be instances of\n a subclass of L{Node}.' super(Sequence, self).__init__(*terms, **kw)<|docstring|>Term that collects an ordered sequence of terms. The terms are provided as arguments. All must be instances of a subclass of L{Node}.<|endoftext|>
4078fbd9e5a83a784cc0e7b86cd30efae34a589f283a8c8748afabcf7a1c5026
def __init__(self, *terms, **kw): 'Term that collects an unordered sequence of terms.\n\n The terms are provided as arguments. All must be instances of\n a subclass of L{Node}.' super(All, self).__init__(*terms, **kw)
Term that collects an unordered sequence of terms. The terms are provided as arguments. All must be instances of a subclass of L{Node}.
pyxb/utils/fac.py
__init__
maciekwawro/pyxb
123
python
def __init__(self, *terms, **kw): 'Term that collects an unordered sequence of terms.\n\n The terms are provided as arguments. All must be instances of\n a subclass of L{Node}.' super(All, self).__init__(*terms, **kw)
def __init__(self, *terms, **kw): 'Term that collects an unordered sequence of terms.\n\n The terms are provided as arguments. All must be instances of\n a subclass of L{Node}.' super(All, self).__init__(*terms, **kw)<|docstring|>Term that collects an unordered sequence of terms. The terms are provided as arguments. All must be instances of a subclass of L{Node}.<|endoftext|>
4318b6f156eb3bc393528a0aabdec8ce09de7ac6f0a94e83e112377de18146f9
@classmethod def CreateTermTree(cls, *terms): 'Create a term tree that implements unordered catenation of\n the terms.\n\n This expansion results in a standard choice/sequence term\n tree, at the cost of quadratic state expansion because terms\n are L{cloned<Node.clone>} as required to satisfy the tree\n requirements of the term tree.\n\n @param terms: The tuple of terms that are elements of an\n accepted sequence.\n\n @return: A term tree comprising a choice between sequences\n that connect each term to the unordered catenation of the\n remaining terms.' if (1 == len(terms)): return terms[0] disjuncts = [] for i in xrange(len(terms)): n = terms[i] rem = map((lambda _s: _s.clone()), (terms[:i] + terms[(i + 1):])) disjuncts.append(Sequence(n, cls.CreateTermTree(*rem))) return Choice(*disjuncts)
Create a term tree that implements unordered catenation of the terms. This expansion results in a standard choice/sequence term tree, at the cost of quadratic state expansion because terms are L{cloned<Node.clone>} as required to satisfy the tree requirements of the term tree. @param terms: The tuple of terms that are elements of an accepted sequence. @return: A term tree comprising a choice between sequences that connect each term to the unordered catenation of the remaining terms.
pyxb/utils/fac.py
CreateTermTree
maciekwawro/pyxb
123
python
@classmethod def CreateTermTree(cls, *terms): 'Create a term tree that implements unordered catenation of\n the terms.\n\n This expansion results in a standard choice/sequence term\n tree, at the cost of quadratic state expansion because terms\n are L{cloned<Node.clone>} as required to satisfy the tree\n requirements of the term tree.\n\n @param terms: The tuple of terms that are elements of an\n accepted sequence.\n\n @return: A term tree comprising a choice between sequences\n that connect each term to the unordered catenation of the\n remaining terms.' if (1 == len(terms)): return terms[0] disjuncts = [] for i in xrange(len(terms)): n = terms[i] rem = map((lambda _s: _s.clone()), (terms[:i] + terms[(i + 1):])) disjuncts.append(Sequence(n, cls.CreateTermTree(*rem))) return Choice(*disjuncts)
@classmethod def CreateTermTree(cls, *terms): 'Create a term tree that implements unordered catenation of\n the terms.\n\n This expansion results in a standard choice/sequence term\n tree, at the cost of quadratic state expansion because terms\n are L{cloned<Node.clone>} as required to satisfy the tree\n requirements of the term tree.\n\n @param terms: The tuple of terms that are elements of an\n accepted sequence.\n\n @return: A term tree comprising a choice between sequences\n that connect each term to the unordered catenation of the\n remaining terms.' if (1 == len(terms)): return terms[0] disjuncts = [] for i in xrange(len(terms)): n = terms[i] rem = map((lambda _s: _s.clone()), (terms[:i] + terms[(i + 1):])) disjuncts.append(Sequence(n, cls.CreateTermTree(*rem))) return Choice(*disjuncts)<|docstring|>Create a term tree that implements unordered catenation of the terms. This expansion results in a standard choice/sequence term tree, at the cost of quadratic state expansion because terms are L{cloned<Node.clone>} as required to satisfy the tree requirements of the term tree. @param terms: The tuple of terms that are elements of an accepted sequence. @return: A term tree comprising a choice between sequences that connect each term to the unordered catenation of the remaining terms.<|endoftext|>
a21857d10e25ed4da4bb7897d78ff550d0a8f2052158d32ac7a18fd270ae62ee
def __init__(self, app: NDNApp, prefix: NonStrictName, repo_name: NonStrictName): '\n This client deletes data packets from the remote repo.\n\n :param app: NDNApp.\n :param repo_name: NonStrictName. Routable name to remote repo.\n ' self.app = app self.prefix = prefix self.repo_name = repo_name self.pb = PubSub(self.app, self.prefix)
This client deletes data packets from the remote repo. :param app: NDNApp. :param repo_name: NonStrictName. Routable name to remote repo.
ndn_python_repo/clients/delete.py
__init__
satyaprakash-1729/ndn-python-repo
0
python
def __init__(self, app: NDNApp, prefix: NonStrictName, repo_name: NonStrictName): '\n This client deletes data packets from the remote repo.\n\n :param app: NDNApp.\n :param repo_name: NonStrictName. Routable name to remote repo.\n ' self.app = app self.prefix = prefix self.repo_name = repo_name self.pb = PubSub(self.app, self.prefix)
def __init__(self, app: NDNApp, prefix: NonStrictName, repo_name: NonStrictName): '\n This client deletes data packets from the remote repo.\n\n :param app: NDNApp.\n :param repo_name: NonStrictName. Routable name to remote repo.\n ' self.app = app self.prefix = prefix self.repo_name = repo_name self.pb = PubSub(self.app, self.prefix)<|docstring|>This client deletes data packets from the remote repo. :param app: NDNApp. :param repo_name: NonStrictName. Routable name to remote repo.<|endoftext|>
3be556d0d053b6403fc06c63f649e0ce85c0ee34ab1f6bc2c7674a159f1f7745
async def delete_file(self, prefix: NonStrictName, start_block_id: int=None, end_block_id: int=None) -> int: '\n Delete from repo packets between "<name_at_repo>/<start_block_id>" and "<name_at_repo>/<end_block_id>" inclusively.\n\n :param prefix: NonStrictName. The name of the file stored in the remote repo.\n :param start_block_id: int. Default value is 0.\n :param end_block_id: int. If not specified, repo will attempt to delete all data packets with segment number starting from `start_block_id` continously.\n :return: Number of deleted packets.\n ' cmd_param = RepoCommandParameter() cmd_param.name = prefix cmd_param.start_block_id = start_block_id cmd_param.end_block_id = end_block_id cmd_param.register_prefix = prefix process_id = gen_nonce() cmd_param.process_id = process_id cmd_param_bytes = cmd_param.encode() (await self.pb.wait_for_ready()) self.pb.publish((self.repo_name + ['delete']), cmd_param_bytes) return (await self._wait_for_finish(process_id))
Delete from repo packets between "<name_at_repo>/<start_block_id>" and "<name_at_repo>/<end_block_id>" inclusively. :param prefix: NonStrictName. The name of the file stored in the remote repo. :param start_block_id: int. Default value is 0. :param end_block_id: int. If not specified, repo will attempt to delete all data packets with segment number starting from `start_block_id` continously. :return: Number of deleted packets.
ndn_python_repo/clients/delete.py
delete_file
satyaprakash-1729/ndn-python-repo
0
python
async def delete_file(self, prefix: NonStrictName, start_block_id: int=None, end_block_id: int=None) -> int: '\n Delete from repo packets between "<name_at_repo>/<start_block_id>" and "<name_at_repo>/<end_block_id>" inclusively.\n\n :param prefix: NonStrictName. The name of the file stored in the remote repo.\n :param start_block_id: int. Default value is 0.\n :param end_block_id: int. If not specified, repo will attempt to delete all data packets with segment number starting from `start_block_id` continously.\n :return: Number of deleted packets.\n ' cmd_param = RepoCommandParameter() cmd_param.name = prefix cmd_param.start_block_id = start_block_id cmd_param.end_block_id = end_block_id cmd_param.register_prefix = prefix process_id = gen_nonce() cmd_param.process_id = process_id cmd_param_bytes = cmd_param.encode() (await self.pb.wait_for_ready()) self.pb.publish((self.repo_name + ['delete']), cmd_param_bytes) return (await self._wait_for_finish(process_id))
async def delete_file(self, prefix: NonStrictName, start_block_id: int=None, end_block_id: int=None) -> int: '\n Delete from repo packets between "<name_at_repo>/<start_block_id>" and "<name_at_repo>/<end_block_id>" inclusively.\n\n :param prefix: NonStrictName. The name of the file stored in the remote repo.\n :param start_block_id: int. Default value is 0.\n :param end_block_id: int. If not specified, repo will attempt to delete all data packets with segment number starting from `start_block_id` continously.\n :return: Number of deleted packets.\n ' cmd_param = RepoCommandParameter() cmd_param.name = prefix cmd_param.start_block_id = start_block_id cmd_param.end_block_id = end_block_id cmd_param.register_prefix = prefix process_id = gen_nonce() cmd_param.process_id = process_id cmd_param_bytes = cmd_param.encode() (await self.pb.wait_for_ready()) self.pb.publish((self.repo_name + ['delete']), cmd_param_bytes) return (await self._wait_for_finish(process_id))<|docstring|>Delete from repo packets between "<name_at_repo>/<start_block_id>" and "<name_at_repo>/<end_block_id>" inclusively. :param prefix: NonStrictName. The name of the file stored in the remote repo. :param start_block_id: int. Default value is 0. :param end_block_id: int. If not specified, repo will attempt to delete all data packets with segment number starting from `start_block_id` continously. :return: Number of deleted packets.<|endoftext|>
e38b6fb700f5417e03d2ff305db5dca2a70aa2e7db91ee2b56804b95d2ee3be8
async def _wait_for_finish(self, process_id: int): '\n Send delete check interest to wait until delete process completes\n\n :param process_id: int. The process id to check for delete process\n :return: Number of deleted packets.\n ' checker = CommandChecker(self.app) n_retries = 3 while (n_retries > 0): response = (await checker.check_delete(self.repo_name, process_id)) if (response is None): logging.info(f'Response code is None') (await aio.sleep(1)) elif (response.status_code == 404): n_retries -= 1 logging.info(f'Response code is {response.status_code}') (await aio.sleep(1)) elif (response.status_code == 300): logging.info(f'Response code is {response.status_code}') (await aio.sleep(1)) elif (response.status_code == 200): logging.info('Delete process {} status: {}, delete_num: {}'.format(process_id, response.status_code, response.delete_num)) return response.delete_num else: assert False
Send delete check interest to wait until delete process completes :param process_id: int. The process id to check for delete process :return: Number of deleted packets.
ndn_python_repo/clients/delete.py
_wait_for_finish
satyaprakash-1729/ndn-python-repo
0
python
async def _wait_for_finish(self, process_id: int): '\n Send delete check interest to wait until delete process completes\n\n :param process_id: int. The process id to check for delete process\n :return: Number of deleted packets.\n ' checker = CommandChecker(self.app) n_retries = 3 while (n_retries > 0): response = (await checker.check_delete(self.repo_name, process_id)) if (response is None): logging.info(f'Response code is None') (await aio.sleep(1)) elif (response.status_code == 404): n_retries -= 1 logging.info(f'Response code is {response.status_code}') (await aio.sleep(1)) elif (response.status_code == 300): logging.info(f'Response code is {response.status_code}') (await aio.sleep(1)) elif (response.status_code == 200): logging.info('Delete process {} status: {}, delete_num: {}'.format(process_id, response.status_code, response.delete_num)) return response.delete_num else: assert False
async def _wait_for_finish(self, process_id: int): '\n Send delete check interest to wait until delete process completes\n\n :param process_id: int. The process id to check for delete process\n :return: Number of deleted packets.\n ' checker = CommandChecker(self.app) n_retries = 3 while (n_retries > 0): response = (await checker.check_delete(self.repo_name, process_id)) if (response is None): logging.info(f'Response code is None') (await aio.sleep(1)) elif (response.status_code == 404): n_retries -= 1 logging.info(f'Response code is {response.status_code}') (await aio.sleep(1)) elif (response.status_code == 300): logging.info(f'Response code is {response.status_code}') (await aio.sleep(1)) elif (response.status_code == 200): logging.info('Delete process {} status: {}, delete_num: {}'.format(process_id, response.status_code, response.delete_num)) return response.delete_num else: assert False<|docstring|>Send delete check interest to wait until delete process completes :param process_id: int. The process id to check for delete process :return: Number of deleted packets.<|endoftext|>
a8990def49e31d12eba9007d8d156ad187567d81d7ab9af5826273daa23500b7
def annotate_bed(bedfile, fasta_file, stop_required=True, min_len=50, longest=False, protein_fasta=None, min_aln_score_thr=(- 1)): ' Given a BED file and corresponding fasta file,\n returns an iterator of bedline objects with ORF annotations.\n Args:\n bedfile (string): path to the bedfile\n fasta_files (string): path to the fasta file\n protein_fasta (str): Fasta file of know proteins against which ORFs are aligned\n min_aln_score_thr (int): Minimum aligmement score to consider hits againts protein_fasta\n Returns:\n bedline \n ' seq_dict = utils.load_fasta(fasta_file) if (protein_fasta is not None): protein_seq_dict = utils.load_fasta(protein_fasta) with open(bedfile, 'r') as bed: for line in bed: bl = bedline(line.split('\t')) seq = sequence(utils.get_sequence_from_seq_dict(bl, seq_dict), orfs_req_stop=stop_required, min_orf_len=min_len) if longest: new_orf = seq.longest_orf else: new_orf = seq.first_orf if (new_orf is not None): orf_start = bl.tx2genome(new_orf.start, stranded=True) orf_end = (bl.tx2genome((new_orf.stop - 1), stranded=True) + 1) if (protein_fasta is not None): orf_id = new_orf.find_orf_best_match(protein_seq_dict, min_score_thr=min_aln_score_thr) bl.name = ((bl.name + '#') + orf_id) else: orf_start = bl.end orf_end = bl.end bl.cdsStart = orf_start bl.cdsEnd = orf_end (yield bl)
Given a BED file and corresponding fasta file, returns an iterator of bedline objects with ORF annotations. Args: bedfile (string): path to the bedfile fasta_files (string): path to the fasta file protein_fasta (str): Fasta file of know proteins against which ORFs are aligned min_aln_score_thr (int): Minimum aligmement score to consider hits againts protein_fasta Returns: bedline
orf_annotate/annotate_bed.py
annotate_bed
tleonardi/ORF_annotate
0
python
def annotate_bed(bedfile, fasta_file, stop_required=True, min_len=50, longest=False, protein_fasta=None, min_aln_score_thr=(- 1)): ' Given a BED file and corresponding fasta file,\n returns an iterator of bedline objects with ORF annotations.\n Args:\n bedfile (string): path to the bedfile\n fasta_files (string): path to the fasta file\n protein_fasta (str): Fasta file of know proteins against which ORFs are aligned\n min_aln_score_thr (int): Minimum aligmement score to consider hits againts protein_fasta\n Returns:\n bedline \n ' seq_dict = utils.load_fasta(fasta_file) if (protein_fasta is not None): protein_seq_dict = utils.load_fasta(protein_fasta) with open(bedfile, 'r') as bed: for line in bed: bl = bedline(line.split('\t')) seq = sequence(utils.get_sequence_from_seq_dict(bl, seq_dict), orfs_req_stop=stop_required, min_orf_len=min_len) if longest: new_orf = seq.longest_orf else: new_orf = seq.first_orf if (new_orf is not None): orf_start = bl.tx2genome(new_orf.start, stranded=True) orf_end = (bl.tx2genome((new_orf.stop - 1), stranded=True) + 1) if (protein_fasta is not None): orf_id = new_orf.find_orf_best_match(protein_seq_dict, min_score_thr=min_aln_score_thr) bl.name = ((bl.name + '#') + orf_id) else: orf_start = bl.end orf_end = bl.end bl.cdsStart = orf_start bl.cdsEnd = orf_end (yield bl)
def annotate_bed(bedfile, fasta_file, stop_required=True, min_len=50, longest=False, protein_fasta=None, min_aln_score_thr=(- 1)): ' Given a BED file and corresponding fasta file,\n returns an iterator of bedline objects with ORF annotations.\n Args:\n bedfile (string): path to the bedfile\n fasta_files (string): path to the fasta file\n protein_fasta (str): Fasta file of know proteins against which ORFs are aligned\n min_aln_score_thr (int): Minimum aligmement score to consider hits againts protein_fasta\n Returns:\n bedline \n ' seq_dict = utils.load_fasta(fasta_file) if (protein_fasta is not None): protein_seq_dict = utils.load_fasta(protein_fasta) with open(bedfile, 'r') as bed: for line in bed: bl = bedline(line.split('\t')) seq = sequence(utils.get_sequence_from_seq_dict(bl, seq_dict), orfs_req_stop=stop_required, min_orf_len=min_len) if longest: new_orf = seq.longest_orf else: new_orf = seq.first_orf if (new_orf is not None): orf_start = bl.tx2genome(new_orf.start, stranded=True) orf_end = (bl.tx2genome((new_orf.stop - 1), stranded=True) + 1) if (protein_fasta is not None): orf_id = new_orf.find_orf_best_match(protein_seq_dict, min_score_thr=min_aln_score_thr) bl.name = ((bl.name + '#') + orf_id) else: orf_start = bl.end orf_end = bl.end bl.cdsStart = orf_start bl.cdsEnd = orf_end (yield bl)<|docstring|>Given a BED file and corresponding fasta file, returns an iterator of bedline objects with ORF annotations. Args: bedfile (string): path to the bedfile fasta_files (string): path to the fasta file protein_fasta (str): Fasta file of know proteins against which ORFs are aligned min_aln_score_thr (int): Minimum aligmement score to consider hits againts protein_fasta Returns: bedline<|endoftext|>
6ce10d35a95113bdee731e2dad095b8c93168037b4bca32c363751253b5b6bcb
def method_handler(request, ctx, store): '\n requests - body requests:\n * account - строка, опционально, может быть пустым\n * login - строка, обязательно, может быть пустым\n * method - строка, обязательно, может быть пустым\n * token - строка, обязательно, может быть пустым\n * arguments - словарь (объект в терминах json), обязательно, может быть пустым\n ' method_handler = {'online_score': OnlineScoreRequestHandler, 'clients_interests': ClientsInterestsRequestHandler} print('Store method handler', (store == None)) method_request = MethodRequest(request['body']) if method_request.is_valid(): if check_auth(method_request): handler = method_handler[method_request.method]() response = handler.get_response(method_request, store, ctx) if ('error' not in response): return (response, OK) else: return (ERRORS.get(response['error'], 'Unknown Error'), response['error']) else: return (ERRORS[FORBIDDEN], FORBIDDEN) else: return (ERRORS[INVALID_REQUEST], INVALID_REQUEST)
requests - body requests: * account - строка, опционально, может быть пустым * login - строка, обязательно, может быть пустым * method - строка, обязательно, может быть пустым * token - строка, обязательно, может быть пустым * arguments - словарь (объект в терминах json), обязательно, может быть пустым
api.py
method_handler
zkid18/scoring_api
0
python
def method_handler(request, ctx, store): '\n requests - body requests:\n * account - строка, опционально, может быть пустым\n * login - строка, обязательно, может быть пустым\n * method - строка, обязательно, может быть пустым\n * token - строка, обязательно, может быть пустым\n * arguments - словарь (объект в терминах json), обязательно, может быть пустым\n ' method_handler = {'online_score': OnlineScoreRequestHandler, 'clients_interests': ClientsInterestsRequestHandler} print('Store method handler', (store == None)) method_request = MethodRequest(request['body']) if method_request.is_valid(): if check_auth(method_request): handler = method_handler[method_request.method]() response = handler.get_response(method_request, store, ctx) if ('error' not in response): return (response, OK) else: return (ERRORS.get(response['error'], 'Unknown Error'), response['error']) else: return (ERRORS[FORBIDDEN], FORBIDDEN) else: return (ERRORS[INVALID_REQUEST], INVALID_REQUEST)
def method_handler(request, ctx, store): '\n requests - body requests:\n * account - строка, опционально, может быть пустым\n * login - строка, обязательно, может быть пустым\n * method - строка, обязательно, может быть пустым\n * token - строка, обязательно, может быть пустым\n * arguments - словарь (объект в терминах json), обязательно, может быть пустым\n ' method_handler = {'online_score': OnlineScoreRequestHandler, 'clients_interests': ClientsInterestsRequestHandler} print('Store method handler', (store == None)) method_request = MethodRequest(request['body']) if method_request.is_valid(): if check_auth(method_request): handler = method_handler[method_request.method]() response = handler.get_response(method_request, store, ctx) if ('error' not in response): return (response, OK) else: return (ERRORS.get(response['error'], 'Unknown Error'), response['error']) else: return (ERRORS[FORBIDDEN], FORBIDDEN) else: return (ERRORS[INVALID_REQUEST], INVALID_REQUEST)<|docstring|>requests - body requests: * account - строка, опционально, может быть пустым * login - строка, обязательно, может быть пустым * method - строка, обязательно, может быть пустым * token - строка, обязательно, может быть пустым * arguments - словарь (объект в терминах json), обязательно, может быть пустым<|endoftext|>
cdf8606c88a4fcc55ec7e3e1c3df1b008e2d90476f8f724b797fa6799aaaafde
def __init__(self, required, nullable): '\n The __init__ method takes the following parameters:\n 1. reuqired - Boolean value indicated that value is required\n 2. nullable - Boolena value indicated that value can be nullable\n ' self.required = required self.nullable = nullable
The __init__ method takes the following parameters: 1. reuqired - Boolean value indicated that value is required 2. nullable - Boolena value indicated that value can be nullable
api.py
__init__
zkid18/scoring_api
0
python
def __init__(self, required, nullable): '\n The __init__ method takes the following parameters:\n 1. reuqired - Boolean value indicated that value is required\n 2. nullable - Boolena value indicated that value can be nullable\n ' self.required = required self.nullable = nullable
def __init__(self, required, nullable): '\n The __init__ method takes the following parameters:\n 1. reuqired - Boolean value indicated that value is required\n 2. nullable - Boolena value indicated that value can be nullable\n ' self.required = required self.nullable = nullable<|docstring|>The __init__ method takes the following parameters: 1. reuqired - Boolean value indicated that value is required 2. nullable - Boolena value indicated that value can be nullable<|endoftext|>
1c31073117ee41605423ac52f2dac22bbd0f4797bad28cb9f1f5321e67efb94e
def validate_value(self, value): '\n * email - строка, в которой есть @, опционально, может быть пустым\n ' validated_field_wrong = 'The email field requires @' valid_condition = ('@' in value) super().validate_value(value) if (not valid_condition): raise TypeError(validated_field_wrong)
* email - строка, в которой есть @, опционально, может быть пустым
api.py
validate_value
zkid18/scoring_api
0
python
def validate_value(self, value): '\n \n ' validated_field_wrong = 'The email field requires @' valid_condition = ('@' in value) super().validate_value(value) if (not valid_condition): raise TypeError(validated_field_wrong)
def validate_value(self, value): '\n \n ' validated_field_wrong = 'The email field requires @' valid_condition = ('@' in value) super().validate_value(value) if (not valid_condition): raise TypeError(validated_field_wrong)<|docstring|>* email - строка, в которой есть @, опционально, может быть пустым<|endoftext|>
82e02a2355b2cf1d6dd1ad514b2224564d723dc7df6ab1924249879c0b8b62e8
def validate_value(self, value): '\n * phone - строка или число, длиной 11, начинается с 7, опционально, может быть пустым\n ' validated_field_wrong = 'The phone field must start from 7 and contain 11 characters' valid_condition = ((len(str(value)) == 11) and (str(value)[0] == '7')) super().validate_value(value) if (not valid_condition): raise TypeError(validated_field_wrong)
* phone - строка или число, длиной 11, начинается с 7, опционально, может быть пустым
api.py
validate_value
zkid18/scoring_api
0
python
def validate_value(self, value): '\n \n ' validated_field_wrong = 'The phone field must start from 7 and contain 11 characters' valid_condition = ((len(str(value)) == 11) and (str(value)[0] == '7')) super().validate_value(value) if (not valid_condition): raise TypeError(validated_field_wrong)
def validate_value(self, value): '\n \n ' validated_field_wrong = 'The phone field must start from 7 and contain 11 characters' valid_condition = ((len(str(value)) == 11) and (str(value)[0] == '7')) super().validate_value(value) if (not valid_condition): raise TypeError(validated_field_wrong)<|docstring|>* phone - строка или число, длиной 11, начинается с 7, опционально, может быть пустым<|endoftext|>
e57c25fa8292912f82f7c43bc93955a78f6b14846c3200b36a1d11d74f056743
def validate_value(self, value): '\n * gender - число 0, 1 или 2, опционально, может быть пустым\n ' super().validate_value(value) if (not isinstance(value, int)): validated_field_wrong = 'For gender only integers are accepted' raise TypeError(validated_field_wrong) elif (value not in [0, 1, 2]): validated_field_wrong = 'Only value in the range [0,1,2] are accepted' raise TypeError(validated_field_wrong)
* gender - число 0, 1 или 2, опционально, может быть пустым
api.py
validate_value
zkid18/scoring_api
0
python
def validate_value(self, value): '\n \n ' super().validate_value(value) if (not isinstance(value, int)): validated_field_wrong = 'For gender only integers are accepted' raise TypeError(validated_field_wrong) elif (value not in [0, 1, 2]): validated_field_wrong = 'Only value in the range [0,1,2] are accepted' raise TypeError(validated_field_wrong)
def validate_value(self, value): '\n \n ' super().validate_value(value) if (not isinstance(value, int)): validated_field_wrong = 'For gender only integers are accepted' raise TypeError(validated_field_wrong) elif (value not in [0, 1, 2]): validated_field_wrong = 'Only value in the range [0,1,2] are accepted' raise TypeError(validated_field_wrong)<|docstring|>* gender - число 0, 1 или 2, опционально, может быть пустым<|endoftext|>
b264aa8eb82e50a6e1b0f43715298fb84b310f7349d4a1b127e67379ad3c1470
def get_request_id(self, headers): '\n Return random request_id that replicate the production\n ' return headers.get('HTTP_X_REQUEST_ID', uuid.uuid4().hex)
Return random request_id that replicate the production
api.py
get_request_id
zkid18/scoring_api
0
python
def get_request_id(self, headers): '\n \n ' return headers.get('HTTP_X_REQUEST_ID', uuid.uuid4().hex)
def get_request_id(self, headers): '\n \n ' return headers.get('HTTP_X_REQUEST_ID', uuid.uuid4().hex)<|docstring|>Return random request_id that replicate the production<|endoftext|>
463c68f41018006057d8c9d51410054cb82a8adea7589df8a4ef7b11e248637e
def __init__(self, model_Y_X, model_T_X, model_Z_X, prel_model_effect, model_TZ_X, h, optimizer='adam', training_options={'epochs': 30, 'batch_size': 32, 'validation_split': 0.1, 'callbacks': [keras.callbacks.EarlyStopping(patience=2, restore_best_weights=True)]}, cov_clip=0.1, n_splits=3, binary_instrument=False, binary_treatment=False, opt_reweighted=False): '\n Parameters\n ----------\n model_Y_X : model to predict E[Y | X]\n model_T_X : model to predict E[T | X]. In alt_fit, this model is also\n used to predict E[T | X, Z]\n model_Z_X : model to predict E[Z | X]\n prel_model_effect : model that estimates a preliminary version of the CATE\n (e.g. via DMLIV or other method)\n model_TZ_X : model to estimate E[T * Z | X]\n h : Model\n Keras model that takes X as an input and returns a layer of dimension d_y by d_t\n optimizer : keras optimizer\n training_options : dictionary of keras training options\n cov_clip : clipping of the covariate for regions with low "overlap",\n so as to reduce variance\n n_splits : number of splits to use in cross-fitting\n binary_instrument : whether to stratify cross-fitting splits by instrument\n binary_treatment : whether to stratify cross-fitting splits by treatment\n opt_reweighted : whether to reweight the samples to minimize variance. If True then\n model_effect.fit must accept sample_weight as a kw argument (WeightWrapper from\n utilities can be used for any linear model to enable sample_weights). If True then\n assumes the model_effect is flexible enough to fit the true CATE model. Otherwise,\n it method will return a biased projection to the model_effect space, biased\n to give more weight on parts of the feature space where the instrument is strong.\n ' super(DeepDRIV, self).__init__(model_Y_X, model_T_X, model_Z_X, prel_model_effect, model_TZ_X, _KerasModel(h, optimizer=optimizer, training_options=training_options), cov_clip=cov_clip, n_splits=n_splits, binary_instrument=binary_instrument, binary_treatment=binary_treatment, opt_reweighted=opt_reweighted) return
Parameters ---------- model_Y_X : model to predict E[Y | X] model_T_X : model to predict E[T | X]. In alt_fit, this model is also used to predict E[T | X, Z] model_Z_X : model to predict E[Z | X] prel_model_effect : model that estimates a preliminary version of the CATE (e.g. via DMLIV or other method) model_TZ_X : model to estimate E[T * Z | X] h : Model Keras model that takes X as an input and returns a layer of dimension d_y by d_t optimizer : keras optimizer training_options : dictionary of keras training options cov_clip : clipping of the covariate for regions with low "overlap", so as to reduce variance n_splits : number of splits to use in cross-fitting binary_instrument : whether to stratify cross-fitting splits by instrument binary_treatment : whether to stratify cross-fitting splits by treatment opt_reweighted : whether to reweight the samples to minimize variance. If True then model_effect.fit must accept sample_weight as a kw argument (WeightWrapper from utilities can be used for any linear model to enable sample_weights). If True then assumes the model_effect is flexible enough to fit the true CATE model. Otherwise, it method will return a biased projection to the model_effect space, biased to give more weight on parts of the feature space where the instrument is strong.
prototypes/dml_iv/deep_dr_iv.py
__init__
lwschm/EconML
1,846
python
def __init__(self, model_Y_X, model_T_X, model_Z_X, prel_model_effect, model_TZ_X, h, optimizer='adam', training_options={'epochs': 30, 'batch_size': 32, 'validation_split': 0.1, 'callbacks': [keras.callbacks.EarlyStopping(patience=2, restore_best_weights=True)]}, cov_clip=0.1, n_splits=3, binary_instrument=False, binary_treatment=False, opt_reweighted=False): '\n Parameters\n ----------\n model_Y_X : model to predict E[Y | X]\n model_T_X : model to predict E[T | X]. In alt_fit, this model is also\n used to predict E[T | X, Z]\n model_Z_X : model to predict E[Z | X]\n prel_model_effect : model that estimates a preliminary version of the CATE\n (e.g. via DMLIV or other method)\n model_TZ_X : model to estimate E[T * Z | X]\n h : Model\n Keras model that takes X as an input and returns a layer of dimension d_y by d_t\n optimizer : keras optimizer\n training_options : dictionary of keras training options\n cov_clip : clipping of the covariate for regions with low "overlap",\n so as to reduce variance\n n_splits : number of splits to use in cross-fitting\n binary_instrument : whether to stratify cross-fitting splits by instrument\n binary_treatment : whether to stratify cross-fitting splits by treatment\n opt_reweighted : whether to reweight the samples to minimize variance. If True then\n model_effect.fit must accept sample_weight as a kw argument (WeightWrapper from\n utilities can be used for any linear model to enable sample_weights). If True then\n assumes the model_effect is flexible enough to fit the true CATE model. Otherwise,\n it method will return a biased projection to the model_effect space, biased\n to give more weight on parts of the feature space where the instrument is strong.\n ' super(DeepDRIV, self).__init__(model_Y_X, model_T_X, model_Z_X, prel_model_effect, model_TZ_X, _KerasModel(h, optimizer=optimizer, training_options=training_options), cov_clip=cov_clip, n_splits=n_splits, binary_instrument=binary_instrument, binary_treatment=binary_treatment, opt_reweighted=opt_reweighted) return
def __init__(self, model_Y_X, model_T_X, model_Z_X, prel_model_effect, model_TZ_X, h, optimizer='adam', training_options={'epochs': 30, 'batch_size': 32, 'validation_split': 0.1, 'callbacks': [keras.callbacks.EarlyStopping(patience=2, restore_best_weights=True)]}, cov_clip=0.1, n_splits=3, binary_instrument=False, binary_treatment=False, opt_reweighted=False): '\n Parameters\n ----------\n model_Y_X : model to predict E[Y | X]\n model_T_X : model to predict E[T | X]. In alt_fit, this model is also\n used to predict E[T | X, Z]\n model_Z_X : model to predict E[Z | X]\n prel_model_effect : model that estimates a preliminary version of the CATE\n (e.g. via DMLIV or other method)\n model_TZ_X : model to estimate E[T * Z | X]\n h : Model\n Keras model that takes X as an input and returns a layer of dimension d_y by d_t\n optimizer : keras optimizer\n training_options : dictionary of keras training options\n cov_clip : clipping of the covariate for regions with low "overlap",\n so as to reduce variance\n n_splits : number of splits to use in cross-fitting\n binary_instrument : whether to stratify cross-fitting splits by instrument\n binary_treatment : whether to stratify cross-fitting splits by treatment\n opt_reweighted : whether to reweight the samples to minimize variance. If True then\n model_effect.fit must accept sample_weight as a kw argument (WeightWrapper from\n utilities can be used for any linear model to enable sample_weights). If True then\n assumes the model_effect is flexible enough to fit the true CATE model. Otherwise,\n it method will return a biased projection to the model_effect space, biased\n to give more weight on parts of the feature space where the instrument is strong.\n ' super(DeepDRIV, self).__init__(model_Y_X, model_T_X, model_Z_X, prel_model_effect, model_TZ_X, _KerasModel(h, optimizer=optimizer, training_options=training_options), cov_clip=cov_clip, n_splits=n_splits, binary_instrument=binary_instrument, binary_treatment=binary_treatment, opt_reweighted=opt_reweighted) return<|docstring|>Parameters ---------- model_Y_X : model to predict E[Y | X] model_T_X : model to predict E[T | X]. In alt_fit, this model is also used to predict E[T | X, Z] model_Z_X : model to predict E[Z | X] prel_model_effect : model that estimates a preliminary version of the CATE (e.g. via DMLIV or other method) model_TZ_X : model to estimate E[T * Z | X] h : Model Keras model that takes X as an input and returns a layer of dimension d_y by d_t optimizer : keras optimizer training_options : dictionary of keras training options cov_clip : clipping of the covariate for regions with low "overlap", so as to reduce variance n_splits : number of splits to use in cross-fitting binary_instrument : whether to stratify cross-fitting splits by instrument binary_treatment : whether to stratify cross-fitting splits by treatment opt_reweighted : whether to reweight the samples to minimize variance. If True then model_effect.fit must accept sample_weight as a kw argument (WeightWrapper from utilities can be used for any linear model to enable sample_weights). If True then assumes the model_effect is flexible enough to fit the true CATE model. Otherwise, it method will return a biased projection to the model_effect space, biased to give more weight on parts of the feature space where the instrument is strong.<|endoftext|>
64251a518fa2609a210f59868657a7ed96275d0d94ad34084ded6cecd97b3a66
def __init__(self, model_Y_X, model_T_X, model_T_XZ, prel_model_effect, model_TZ_X, h, optimizer='adam', training_options={'epochs': 30, 'batch_size': 32, 'validation_split': 0.1, 'callbacks': [keras.callbacks.EarlyStopping(patience=2, restore_best_weights=True)]}, cov_clip=0.1, n_splits=3, binary_instrument=False, binary_treatment=False, opt_reweighted=False): '\n Parameters\n ----------\n model_Y_X : model to predict E[Y | X]\n model_T_X : model to predict E[T | X]. In alt_fit, this model is also\n used to predict E[T | X, Z]\n model_T_XZ : model to predict E[T | X, Z]\n model_theta : model that estimates a preliminary version of the CATE\n (e.g. via DMLIV or other method)\n model_TZ_X : model to estimate cov[T, E[T|X,Z] | X] = E[(T-E[T|X]) * (E[T|X,Z] - E[T|X]) | X].\n h : Model\n Keras model that takes X as an input and returns a layer of dimension d_y by d_t\n optimizer : keras optimizer\n training_options : dictionary of keras training options\n cov_clip : clipping of the covariate for regions with low "overlap",\n so as to reduce variance\n n_splits : number of splits to use in cross-fitting\n binary_instrument : whether to stratify cross-fitting splits by instrument\n binary_treatment : whether to stratify cross-fitting splits by treatment\n opt_reweighted : whether to reweight the samples to minimize variance. If True then\n model_effect.fit must accept sample_weight as a kw argument (WeightWrapper from\n utilities can be used for any linear model to enable sample_weights). If True then\n assumes the model_effect is flexible enough to fit the true CATE model. Otherwise,\n it method will return a biased projection to the model_effect space, biased\n to give more weight on parts of the feature space where the instrument is strong.\n ' super(DeepProjectedDRIV, self).__init__(model_Y_X, model_T_X, model_T_XZ, prel_model_effect, model_TZ_X, _KerasModel(h, optimizer=optimizer, training_options=training_options), cov_clip=cov_clip, n_splits=n_splits, binary_instrument=binary_instrument, binary_treatment=binary_treatment, opt_reweighted=opt_reweighted) return
Parameters ---------- model_Y_X : model to predict E[Y | X] model_T_X : model to predict E[T | X]. In alt_fit, this model is also used to predict E[T | X, Z] model_T_XZ : model to predict E[T | X, Z] model_theta : model that estimates a preliminary version of the CATE (e.g. via DMLIV or other method) model_TZ_X : model to estimate cov[T, E[T|X,Z] | X] = E[(T-E[T|X]) * (E[T|X,Z] - E[T|X]) | X]. h : Model Keras model that takes X as an input and returns a layer of dimension d_y by d_t optimizer : keras optimizer training_options : dictionary of keras training options cov_clip : clipping of the covariate for regions with low "overlap", so as to reduce variance n_splits : number of splits to use in cross-fitting binary_instrument : whether to stratify cross-fitting splits by instrument binary_treatment : whether to stratify cross-fitting splits by treatment opt_reweighted : whether to reweight the samples to minimize variance. If True then model_effect.fit must accept sample_weight as a kw argument (WeightWrapper from utilities can be used for any linear model to enable sample_weights). If True then assumes the model_effect is flexible enough to fit the true CATE model. Otherwise, it method will return a biased projection to the model_effect space, biased to give more weight on parts of the feature space where the instrument is strong.
prototypes/dml_iv/deep_dr_iv.py
__init__
lwschm/EconML
1,846
python
def __init__(self, model_Y_X, model_T_X, model_T_XZ, prel_model_effect, model_TZ_X, h, optimizer='adam', training_options={'epochs': 30, 'batch_size': 32, 'validation_split': 0.1, 'callbacks': [keras.callbacks.EarlyStopping(patience=2, restore_best_weights=True)]}, cov_clip=0.1, n_splits=3, binary_instrument=False, binary_treatment=False, opt_reweighted=False): '\n Parameters\n ----------\n model_Y_X : model to predict E[Y | X]\n model_T_X : model to predict E[T | X]. In alt_fit, this model is also\n used to predict E[T | X, Z]\n model_T_XZ : model to predict E[T | X, Z]\n model_theta : model that estimates a preliminary version of the CATE\n (e.g. via DMLIV or other method)\n model_TZ_X : model to estimate cov[T, E[T|X,Z] | X] = E[(T-E[T|X]) * (E[T|X,Z] - E[T|X]) | X].\n h : Model\n Keras model that takes X as an input and returns a layer of dimension d_y by d_t\n optimizer : keras optimizer\n training_options : dictionary of keras training options\n cov_clip : clipping of the covariate for regions with low "overlap",\n so as to reduce variance\n n_splits : number of splits to use in cross-fitting\n binary_instrument : whether to stratify cross-fitting splits by instrument\n binary_treatment : whether to stratify cross-fitting splits by treatment\n opt_reweighted : whether to reweight the samples to minimize variance. If True then\n model_effect.fit must accept sample_weight as a kw argument (WeightWrapper from\n utilities can be used for any linear model to enable sample_weights). If True then\n assumes the model_effect is flexible enough to fit the true CATE model. Otherwise,\n it method will return a biased projection to the model_effect space, biased\n to give more weight on parts of the feature space where the instrument is strong.\n ' super(DeepProjectedDRIV, self).__init__(model_Y_X, model_T_X, model_T_XZ, prel_model_effect, model_TZ_X, _KerasModel(h, optimizer=optimizer, training_options=training_options), cov_clip=cov_clip, n_splits=n_splits, binary_instrument=binary_instrument, binary_treatment=binary_treatment, opt_reweighted=opt_reweighted) return
def __init__(self, model_Y_X, model_T_X, model_T_XZ, prel_model_effect, model_TZ_X, h, optimizer='adam', training_options={'epochs': 30, 'batch_size': 32, 'validation_split': 0.1, 'callbacks': [keras.callbacks.EarlyStopping(patience=2, restore_best_weights=True)]}, cov_clip=0.1, n_splits=3, binary_instrument=False, binary_treatment=False, opt_reweighted=False): '\n Parameters\n ----------\n model_Y_X : model to predict E[Y | X]\n model_T_X : model to predict E[T | X]. In alt_fit, this model is also\n used to predict E[T | X, Z]\n model_T_XZ : model to predict E[T | X, Z]\n model_theta : model that estimates a preliminary version of the CATE\n (e.g. via DMLIV or other method)\n model_TZ_X : model to estimate cov[T, E[T|X,Z] | X] = E[(T-E[T|X]) * (E[T|X,Z] - E[T|X]) | X].\n h : Model\n Keras model that takes X as an input and returns a layer of dimension d_y by d_t\n optimizer : keras optimizer\n training_options : dictionary of keras training options\n cov_clip : clipping of the covariate for regions with low "overlap",\n so as to reduce variance\n n_splits : number of splits to use in cross-fitting\n binary_instrument : whether to stratify cross-fitting splits by instrument\n binary_treatment : whether to stratify cross-fitting splits by treatment\n opt_reweighted : whether to reweight the samples to minimize variance. If True then\n model_effect.fit must accept sample_weight as a kw argument (WeightWrapper from\n utilities can be used for any linear model to enable sample_weights). If True then\n assumes the model_effect is flexible enough to fit the true CATE model. Otherwise,\n it method will return a biased projection to the model_effect space, biased\n to give more weight on parts of the feature space where the instrument is strong.\n ' super(DeepProjectedDRIV, self).__init__(model_Y_X, model_T_X, model_T_XZ, prel_model_effect, model_TZ_X, _KerasModel(h, optimizer=optimizer, training_options=training_options), cov_clip=cov_clip, n_splits=n_splits, binary_instrument=binary_instrument, binary_treatment=binary_treatment, opt_reweighted=opt_reweighted) return<|docstring|>Parameters ---------- model_Y_X : model to predict E[Y | X] model_T_X : model to predict E[T | X]. In alt_fit, this model is also used to predict E[T | X, Z] model_T_XZ : model to predict E[T | X, Z] model_theta : model that estimates a preliminary version of the CATE (e.g. via DMLIV or other method) model_TZ_X : model to estimate cov[T, E[T|X,Z] | X] = E[(T-E[T|X]) * (E[T|X,Z] - E[T|X]) | X]. h : Model Keras model that takes X as an input and returns a layer of dimension d_y by d_t optimizer : keras optimizer training_options : dictionary of keras training options cov_clip : clipping of the covariate for regions with low "overlap", so as to reduce variance n_splits : number of splits to use in cross-fitting binary_instrument : whether to stratify cross-fitting splits by instrument binary_treatment : whether to stratify cross-fitting splits by treatment opt_reweighted : whether to reweight the samples to minimize variance. If True then model_effect.fit must accept sample_weight as a kw argument (WeightWrapper from utilities can be used for any linear model to enable sample_weights). If True then assumes the model_effect is flexible enough to fit the true CATE model. Otherwise, it method will return a biased projection to the model_effect space, biased to give more weight on parts of the feature space where the instrument is strong.<|endoftext|>
a291ef1402b337ca1684a0c6bdd97589aa227d0b304528e009a7fe11e04ef6a4
def __init__(self, model_Y_X, model_T_XZ, h, optimizer='adam', training_options={'epochs': 30, 'batch_size': 32, 'validation_split': 0.1, 'callbacks': [keras.callbacks.EarlyStopping(patience=2, restore_best_weights=True)]}, final_model_effect=None, cov_clip=0.1, n_splits=3, opt_reweighted=False): '\n Parameters\n ----------\n model_Y_X : model to predict E[Y | X]\n model_T_XZ : model to predict E[T | X, Z]\n h : Model\n Keras model that takes X as an input and returns a layer of dimension d_y by d_t\n optimizer : keras optimizer\n training_options : dictionary of keras training options\n final_model_effect : a final model for the CATE and projections. If None, then\n flexible_model_effect is also used as a final model\n cov_clip : clipping of the covariate for regions with low "overlap",\n so as to reduce variance\n n_splits : number of splits to use in cross-fitting\n opt_reweighted : whether to reweight the samples to minimize variance. If True then\n final_model_effect.fit must accept sample_weight as a kw argument (WeightWrapper from\n utilities can be used for any linear model to enable sample_weights). If True then\n assumes the final_model_effect is flexible enough to fit the true CATE model. Otherwise,\n it method will return a biased projection to the model_effect space, biased\n to give more weight on parts of the feature space where the instrument is strong.\n ' flexible_model_effect = _KerasModel(h, optimizer=optimizer, training_options=training_options) super(DeepIntentToTreatDRIV, self).__init__(model_Y_X, model_T_XZ, flexible_model_effect, final_model_effect=final_model_effect, cov_clip=cov_clip, n_splits=n_splits, opt_reweighted=opt_reweighted) return
Parameters ---------- model_Y_X : model to predict E[Y | X] model_T_XZ : model to predict E[T | X, Z] h : Model Keras model that takes X as an input and returns a layer of dimension d_y by d_t optimizer : keras optimizer training_options : dictionary of keras training options final_model_effect : a final model for the CATE and projections. If None, then flexible_model_effect is also used as a final model cov_clip : clipping of the covariate for regions with low "overlap", so as to reduce variance n_splits : number of splits to use in cross-fitting opt_reweighted : whether to reweight the samples to minimize variance. If True then final_model_effect.fit must accept sample_weight as a kw argument (WeightWrapper from utilities can be used for any linear model to enable sample_weights). If True then assumes the final_model_effect is flexible enough to fit the true CATE model. Otherwise, it method will return a biased projection to the model_effect space, biased to give more weight on parts of the feature space where the instrument is strong.
prototypes/dml_iv/deep_dr_iv.py
__init__
lwschm/EconML
1,846
python
def __init__(self, model_Y_X, model_T_XZ, h, optimizer='adam', training_options={'epochs': 30, 'batch_size': 32, 'validation_split': 0.1, 'callbacks': [keras.callbacks.EarlyStopping(patience=2, restore_best_weights=True)]}, final_model_effect=None, cov_clip=0.1, n_splits=3, opt_reweighted=False): '\n Parameters\n ----------\n model_Y_X : model to predict E[Y | X]\n model_T_XZ : model to predict E[T | X, Z]\n h : Model\n Keras model that takes X as an input and returns a layer of dimension d_y by d_t\n optimizer : keras optimizer\n training_options : dictionary of keras training options\n final_model_effect : a final model for the CATE and projections. If None, then\n flexible_model_effect is also used as a final model\n cov_clip : clipping of the covariate for regions with low "overlap",\n so as to reduce variance\n n_splits : number of splits to use in cross-fitting\n opt_reweighted : whether to reweight the samples to minimize variance. If True then\n final_model_effect.fit must accept sample_weight as a kw argument (WeightWrapper from\n utilities can be used for any linear model to enable sample_weights). If True then\n assumes the final_model_effect is flexible enough to fit the true CATE model. Otherwise,\n it method will return a biased projection to the model_effect space, biased\n to give more weight on parts of the feature space where the instrument is strong.\n ' flexible_model_effect = _KerasModel(h, optimizer=optimizer, training_options=training_options) super(DeepIntentToTreatDRIV, self).__init__(model_Y_X, model_T_XZ, flexible_model_effect, final_model_effect=final_model_effect, cov_clip=cov_clip, n_splits=n_splits, opt_reweighted=opt_reweighted) return
def __init__(self, model_Y_X, model_T_XZ, h, optimizer='adam', training_options={'epochs': 30, 'batch_size': 32, 'validation_split': 0.1, 'callbacks': [keras.callbacks.EarlyStopping(patience=2, restore_best_weights=True)]}, final_model_effect=None, cov_clip=0.1, n_splits=3, opt_reweighted=False): '\n Parameters\n ----------\n model_Y_X : model to predict E[Y | X]\n model_T_XZ : model to predict E[T | X, Z]\n h : Model\n Keras model that takes X as an input and returns a layer of dimension d_y by d_t\n optimizer : keras optimizer\n training_options : dictionary of keras training options\n final_model_effect : a final model for the CATE and projections. If None, then\n flexible_model_effect is also used as a final model\n cov_clip : clipping of the covariate for regions with low "overlap",\n so as to reduce variance\n n_splits : number of splits to use in cross-fitting\n opt_reweighted : whether to reweight the samples to minimize variance. If True then\n final_model_effect.fit must accept sample_weight as a kw argument (WeightWrapper from\n utilities can be used for any linear model to enable sample_weights). If True then\n assumes the final_model_effect is flexible enough to fit the true CATE model. Otherwise,\n it method will return a biased projection to the model_effect space, biased\n to give more weight on parts of the feature space where the instrument is strong.\n ' flexible_model_effect = _KerasModel(h, optimizer=optimizer, training_options=training_options) super(DeepIntentToTreatDRIV, self).__init__(model_Y_X, model_T_XZ, flexible_model_effect, final_model_effect=final_model_effect, cov_clip=cov_clip, n_splits=n_splits, opt_reweighted=opt_reweighted) return<|docstring|>Parameters ---------- model_Y_X : model to predict E[Y | X] model_T_XZ : model to predict E[T | X, Z] h : Model Keras model that takes X as an input and returns a layer of dimension d_y by d_t optimizer : keras optimizer training_options : dictionary of keras training options final_model_effect : a final model for the CATE and projections. If None, then flexible_model_effect is also used as a final model cov_clip : clipping of the covariate for regions with low "overlap", so as to reduce variance n_splits : number of splits to use in cross-fitting opt_reweighted : whether to reweight the samples to minimize variance. If True then final_model_effect.fit must accept sample_weight as a kw argument (WeightWrapper from utilities can be used for any linear model to enable sample_weights). If True then assumes the final_model_effect is flexible enough to fit the true CATE model. Otherwise, it method will return a biased projection to the model_effect space, biased to give more weight on parts of the feature space where the instrument is strong.<|endoftext|>
5ca88a2fd89a387c49c84d578d7785ed8809caf515c9067a9cdae80a36825969
def is_valid(self, scopes=None): '\n Checks if the access token is valid.\n\n :param scopes: An iterable containing the scopes to check or None\n ' return ((not self.is_expired()) and self.allow_scopes(scopes))
Checks if the access token is valid. :param scopes: An iterable containing the scopes to check or None
oauth2_provider_jwt/authentication.py
is_valid
ericstone57/django-oauth-toolkit-jwt
33
python
def is_valid(self, scopes=None): '\n Checks if the access token is valid.\n\n :param scopes: An iterable containing the scopes to check or None\n ' return ((not self.is_expired()) and self.allow_scopes(scopes))
def is_valid(self, scopes=None): '\n Checks if the access token is valid.\n\n :param scopes: An iterable containing the scopes to check or None\n ' return ((not self.is_expired()) and self.allow_scopes(scopes))<|docstring|>Checks if the access token is valid. :param scopes: An iterable containing the scopes to check or None<|endoftext|>
a70fabf0aee5ca5e2801bf8d359f9e8523161ff8bf315bc8d5959c40bee8803b
def is_expired(self): '\n Check token expiration with timezone awareness\n ' return False
Check token expiration with timezone awareness
oauth2_provider_jwt/authentication.py
is_expired
ericstone57/django-oauth-toolkit-jwt
33
python
def is_expired(self): '\n \n ' return False
def is_expired(self): '\n \n ' return False<|docstring|>Check token expiration with timezone awareness<|endoftext|>
cb63c5adc9a3d274dab125791beb7650a89d6d79399a3009d8a585bcfeaaf653
def allow_scopes(self, scopes): '\n Check if the token allows the provided scopes\n\n :param scopes: An iterable containing the scopes to check\n ' if (not scopes): return True provided_scopes = set(self.scope.split()) resource_scopes = set(scopes) return resource_scopes.issubset(provided_scopes)
Check if the token allows the provided scopes :param scopes: An iterable containing the scopes to check
oauth2_provider_jwt/authentication.py
allow_scopes
ericstone57/django-oauth-toolkit-jwt
33
python
def allow_scopes(self, scopes): '\n Check if the token allows the provided scopes\n\n :param scopes: An iterable containing the scopes to check\n ' if (not scopes): return True provided_scopes = set(self.scope.split()) resource_scopes = set(scopes) return resource_scopes.issubset(provided_scopes)
def allow_scopes(self, scopes): '\n Check if the token allows the provided scopes\n\n :param scopes: An iterable containing the scopes to check\n ' if (not scopes): return True provided_scopes = set(self.scope.split()) resource_scopes = set(scopes) return resource_scopes.issubset(provided_scopes)<|docstring|>Check if the token allows the provided scopes :param scopes: An iterable containing the scopes to check<|endoftext|>
4b90f6143570c91a728e5f247203e7e083721c227f7560d6fc66bff5e2158683
def authenticate(self, request): '\n Returns a two-tuple of `User` and token if a valid signature has been\n supplied using JWT-based authentication. Otherwise returns `None`.\n ' jwt_value = self._get_jwt_value(request) if (jwt_value is None): return None try: payload = decode_jwt(jwt_value) except jwt.ExpiredSignatureError: msg = 'Signature has expired.' raise exceptions.AuthenticationFailed(msg) except jwt.DecodeError: msg = 'Error decoding signature.' raise exceptions.AuthenticationFailed(msg) except jwt.InvalidTokenError: raise exceptions.AuthenticationFailed() self._add_session_details(request, payload) user = self.authenticate_credentials(payload) return (user, JwtToken(payload))
Returns a two-tuple of `User` and token if a valid signature has been supplied using JWT-based authentication. Otherwise returns `None`.
oauth2_provider_jwt/authentication.py
authenticate
ericstone57/django-oauth-toolkit-jwt
33
python
def authenticate(self, request): '\n Returns a two-tuple of `User` and token if a valid signature has been\n supplied using JWT-based authentication. Otherwise returns `None`.\n ' jwt_value = self._get_jwt_value(request) if (jwt_value is None): return None try: payload = decode_jwt(jwt_value) except jwt.ExpiredSignatureError: msg = 'Signature has expired.' raise exceptions.AuthenticationFailed(msg) except jwt.DecodeError: msg = 'Error decoding signature.' raise exceptions.AuthenticationFailed(msg) except jwt.InvalidTokenError: raise exceptions.AuthenticationFailed() self._add_session_details(request, payload) user = self.authenticate_credentials(payload) return (user, JwtToken(payload))
def authenticate(self, request): '\n Returns a two-tuple of `User` and token if a valid signature has been\n supplied using JWT-based authentication. Otherwise returns `None`.\n ' jwt_value = self._get_jwt_value(request) if (jwt_value is None): return None try: payload = decode_jwt(jwt_value) except jwt.ExpiredSignatureError: msg = 'Signature has expired.' raise exceptions.AuthenticationFailed(msg) except jwt.DecodeError: msg = 'Error decoding signature.' raise exceptions.AuthenticationFailed(msg) except jwt.InvalidTokenError: raise exceptions.AuthenticationFailed() self._add_session_details(request, payload) user = self.authenticate_credentials(payload) return (user, JwtToken(payload))<|docstring|>Returns a two-tuple of `User` and token if a valid signature has been supplied using JWT-based authentication. Otherwise returns `None`.<|endoftext|>
9932d4ec721ec8fe49d7c51bcccb55060dda0e898ff609f87521d2f78b647aa3
def authenticate_credentials(self, payload): "\n Returns an active user that matches the payload's user id and email.\n " if getattr(settings, 'JWT_AUTH_DISABLED', False): return AnonymousUser() User = get_user_model() username = payload.get(getattr(settings, 'JWT_ID_ATTRIBUTE')) if (not username): msg = 'Invalid payload.' raise exceptions.AuthenticationFailed(msg) try: kwargs = {getattr(settings, 'JWT_ID_ATTRIBUTE'): username} user = User.objects.get(**kwargs) except User.DoesNotExist: msg = 'Invalid signature.' raise exceptions.AuthenticationFailed(msg) if (not user.is_active): msg = 'User account is disabled.' raise exceptions.AuthenticationFailed(msg) return user
Returns an active user that matches the payload's user id and email.
oauth2_provider_jwt/authentication.py
authenticate_credentials
ericstone57/django-oauth-toolkit-jwt
33
python
def authenticate_credentials(self, payload): "\n \n " if getattr(settings, 'JWT_AUTH_DISABLED', False): return AnonymousUser() User = get_user_model() username = payload.get(getattr(settings, 'JWT_ID_ATTRIBUTE')) if (not username): msg = 'Invalid payload.' raise exceptions.AuthenticationFailed(msg) try: kwargs = {getattr(settings, 'JWT_ID_ATTRIBUTE'): username} user = User.objects.get(**kwargs) except User.DoesNotExist: msg = 'Invalid signature.' raise exceptions.AuthenticationFailed(msg) if (not user.is_active): msg = 'User account is disabled.' raise exceptions.AuthenticationFailed(msg) return user
def authenticate_credentials(self, payload): "\n \n " if getattr(settings, 'JWT_AUTH_DISABLED', False): return AnonymousUser() User = get_user_model() username = payload.get(getattr(settings, 'JWT_ID_ATTRIBUTE')) if (not username): msg = 'Invalid payload.' raise exceptions.AuthenticationFailed(msg) try: kwargs = {getattr(settings, 'JWT_ID_ATTRIBUTE'): username} user = User.objects.get(**kwargs) except User.DoesNotExist: msg = 'Invalid signature.' raise exceptions.AuthenticationFailed(msg) if (not user.is_active): msg = 'User account is disabled.' raise exceptions.AuthenticationFailed(msg) return user<|docstring|>Returns an active user that matches the payload's user id and email.<|endoftext|>
c2c3e1a47c38f1b404adcee0f50c142cb60689df6263d342732afd80e0539825
def _add_session_details(self, request, payload): '\n Adds to the session payload details so they can be used anytime.\n ' try: items = payload.iteritems() except AttributeError: items = payload.items() for (k, v) in items: if (k not in ('iat', 'exp')): request.session['jwt_{}'.format(k)] = v
Adds to the session payload details so they can be used anytime.
oauth2_provider_jwt/authentication.py
_add_session_details
ericstone57/django-oauth-toolkit-jwt
33
python
def _add_session_details(self, request, payload): '\n \n ' try: items = payload.iteritems() except AttributeError: items = payload.items() for (k, v) in items: if (k not in ('iat', 'exp')): request.session['jwt_{}'.format(k)] = v
def _add_session_details(self, request, payload): '\n \n ' try: items = payload.iteritems() except AttributeError: items = payload.items() for (k, v) in items: if (k not in ('iat', 'exp')): request.session['jwt_{}'.format(k)] = v<|docstring|>Adds to the session payload details so they can be used anytime.<|endoftext|>
5434b8781014482fad382082529cc11197d8d4230516bb59d47b28d43e0af758
def authenticate_header(self, _request): '\n Return a string to be used as the value of the `WWW-Authenticate`\n header in a `401 Unauthenticated` response, or `None` if the\n authentication scheme should return `403 Permission Denied` responses.\n ' auth_header_prefix = getattr(settings, 'JWT_AUTH_HEADER_PREFIX', 'JWT') return '{0} realm="{1}"'.format(auth_header_prefix, self.www_authenticate_realm)
Return a string to be used as the value of the `WWW-Authenticate` header in a `401 Unauthenticated` response, or `None` if the authentication scheme should return `403 Permission Denied` responses.
oauth2_provider_jwt/authentication.py
authenticate_header
ericstone57/django-oauth-toolkit-jwt
33
python
def authenticate_header(self, _request): '\n Return a string to be used as the value of the `WWW-Authenticate`\n header in a `401 Unauthenticated` response, or `None` if the\n authentication scheme should return `403 Permission Denied` responses.\n ' auth_header_prefix = getattr(settings, 'JWT_AUTH_HEADER_PREFIX', 'JWT') return '{0} realm="{1}"'.format(auth_header_prefix, self.www_authenticate_realm)
def authenticate_header(self, _request): '\n Return a string to be used as the value of the `WWW-Authenticate`\n header in a `401 Unauthenticated` response, or `None` if the\n authentication scheme should return `403 Permission Denied` responses.\n ' auth_header_prefix = getattr(settings, 'JWT_AUTH_HEADER_PREFIX', 'JWT') return '{0} realm="{1}"'.format(auth_header_prefix, self.www_authenticate_realm)<|docstring|>Return a string to be used as the value of the `WWW-Authenticate` header in a `401 Unauthenticated` response, or `None` if the authentication scheme should return `403 Permission Denied` responses.<|endoftext|>
f546f78fd2c77746a1f3a22b992d00542617c32320c1b66e86e038978e10723b
def check_distributions(dists: List[Union[(rv_continuous, None)]], are_stochastic: bool): '\n Checks that the distribution given in input respects the necessary conditions.\n :param dists: a list of distributions\n :param are_stochastic: to check if the the distributions are stochastic.\n ' assert (dists.count(None) in [0, len(dists)]) if (dists[0] is not None): if are_stochastic: assert all(((type(dist.dist) != deterministic_gen) for dist in dists)) else: assert all(((type(dist.dist) == deterministic_gen) for dist in dists))
Checks that the distribution given in input respects the necessary conditions. :param dists: a list of distributions :param are_stochastic: to check if the the distributions are stochastic.
colosseum/utils/mdps.py
check_distributions
MichelangeloConserva/Colosseum
0
python
def check_distributions(dists: List[Union[(rv_continuous, None)]], are_stochastic: bool): '\n Checks that the distribution given in input respects the necessary conditions.\n :param dists: a list of distributions\n :param are_stochastic: to check if the the distributions are stochastic.\n ' assert (dists.count(None) in [0, len(dists)]) if (dists[0] is not None): if are_stochastic: assert all(((type(dist.dist) != deterministic_gen) for dist in dists)) else: assert all(((type(dist.dist) == deterministic_gen) for dist in dists))
def check_distributions(dists: List[Union[(rv_continuous, None)]], are_stochastic: bool): '\n Checks that the distribution given in input respects the necessary conditions.\n :param dists: a list of distributions\n :param are_stochastic: to check if the the distributions are stochastic.\n ' assert (dists.count(None) in [0, len(dists)]) if (dists[0] is not None): if are_stochastic: assert all(((type(dist.dist) != deterministic_gen) for dist in dists)) else: assert all(((type(dist.dist) == deterministic_gen) for dist in dists))<|docstring|>Checks that the distribution given in input respects the necessary conditions. :param dists: a list of distributions :param are_stochastic: to check if the the distributions are stochastic.<|endoftext|>
c14b6834976806521115db1e374f25510a389142fc4f7d1d4ff0d1452ab212df
def save_viterbi(path, viterbi, valuefunc, derivation2str, get_projection): '\n\n :param path: where to save\n :param viterbi: the best derivation\n :param omega_d: a function over derivations\n :param get_projection: a function which returns a projection of a derivation\n ' with smart_wopen(path) as out: print('# score\tyield\tderivation', file=out) print('{0}\t{1}\t{2}'.format(valuefunc(viterbi.derivation), get_projection(viterbi.derivation), derivation2str(viterbi.derivation)), file=out)
:param path: where to save :param viterbi: the best derivation :param omega_d: a function over derivations :param get_projection: a function which returns a projection of a derivation
grasp/io/results.py
save_viterbi
wilkeraziz/grasp
9
python
def save_viterbi(path, viterbi, valuefunc, derivation2str, get_projection): '\n\n :param path: where to save\n :param viterbi: the best derivation\n :param omega_d: a function over derivations\n :param get_projection: a function which returns a projection of a derivation\n ' with smart_wopen(path) as out: print('# score\tyield\tderivation', file=out) print('{0}\t{1}\t{2}'.format(valuefunc(viterbi.derivation), get_projection(viterbi.derivation), derivation2str(viterbi.derivation)), file=out)
def save_viterbi(path, viterbi, valuefunc, derivation2str, get_projection): '\n\n :param path: where to save\n :param viterbi: the best derivation\n :param omega_d: a function over derivations\n :param get_projection: a function which returns a projection of a derivation\n ' with smart_wopen(path) as out: print('# score\tyield\tderivation', file=out) print('{0}\t{1}\t{2}'.format(valuefunc(viterbi.derivation), get_projection(viterbi.derivation), derivation2str(viterbi.derivation)), file=out)<|docstring|>:param path: where to save :param viterbi: the best derivation :param omega_d: a function over derivations :param get_projection: a function which returns a projection of a derivation<|endoftext|>
feef79c515a6c494fbbe193ab42f16d4fee5d2c91361dd974613b41295749118
def save_kbest(path, derivations, get_projection, derivation2str=DerivationYield.derivation): '\n\n :param path: where to save\n :param derivations: sorted list of derivations\n :param omega_d: a function over derivations\n :param get_projection: a function which returns a projection of a derivation\n ' with smart_wopen(path) as out: print('# score\tyield\tderivation', file=out) for d in derivations: print('{0}\t{1}\t{2}'.format(d.value, get_projection(d), derivation2str(d)), file=out)
:param path: where to save :param derivations: sorted list of derivations :param omega_d: a function over derivations :param get_projection: a function which returns a projection of a derivation
grasp/io/results.py
save_kbest
wilkeraziz/grasp
9
python
def save_kbest(path, derivations, get_projection, derivation2str=DerivationYield.derivation): '\n\n :param path: where to save\n :param derivations: sorted list of derivations\n :param omega_d: a function over derivations\n :param get_projection: a function which returns a projection of a derivation\n ' with smart_wopen(path) as out: print('# score\tyield\tderivation', file=out) for d in derivations: print('{0}\t{1}\t{2}'.format(d.value, get_projection(d), derivation2str(d)), file=out)
def save_kbest(path, derivations, get_projection, derivation2str=DerivationYield.derivation): '\n\n :param path: where to save\n :param derivations: sorted list of derivations\n :param omega_d: a function over derivations\n :param get_projection: a function which returns a projection of a derivation\n ' with smart_wopen(path) as out: print('# score\tyield\tderivation', file=out) for d in derivations: print('{0}\t{1}\t{2}'.format(d.value, get_projection(d), derivation2str(d)), file=out)<|docstring|>:param path: where to save :param derivations: sorted list of derivations :param omega_d: a function over derivations :param get_projection: a function which returns a projection of a derivation<|endoftext|>
0d94265f23bd87e9c0af33b7caffff172932652fa91b346379ba07bedf1a094a
def save_mc_derivations(path, groups, inside, valuefunc, derivation2str, semiring=semiring.inside): '\n\n :param path: where to save\n :param samples: sorted list of samples (obtained by group_by_identity)\n :param inside: inside at the root\n :param valuefunc: function to compute the value of the derivation\n :param derivation2str: how to obtain a string from a derivation\n :param semiring: semiring used to normalise probabilities\n ' with smart_wopen(path) as out: total = sum((group.count for group in groups)) print('# MC samples={0} inside={1} semiring={2}'.format(total, inside, semiring), file=out) print('# exact\testimate\tcount\tscore\tderivation', file=out) for group in groups: score = valuefunc(group.key) prob = semiring.as_real(semiring.divide(score, inside)) print('{0}\t{1}\t{2}\t{3}\t{4}'.format(prob, (group.count / total), group.count, score, derivation2str(group.key)), file=out)
:param path: where to save :param samples: sorted list of samples (obtained by group_by_identity) :param inside: inside at the root :param valuefunc: function to compute the value of the derivation :param derivation2str: how to obtain a string from a derivation :param semiring: semiring used to normalise probabilities
grasp/io/results.py
save_mc_derivations
wilkeraziz/grasp
9
python
def save_mc_derivations(path, groups, inside, valuefunc, derivation2str, semiring=semiring.inside): '\n\n :param path: where to save\n :param samples: sorted list of samples (obtained by group_by_identity)\n :param inside: inside at the root\n :param valuefunc: function to compute the value of the derivation\n :param derivation2str: how to obtain a string from a derivation\n :param semiring: semiring used to normalise probabilities\n ' with smart_wopen(path) as out: total = sum((group.count for group in groups)) print('# MC samples={0} inside={1} semiring={2}'.format(total, inside, semiring), file=out) print('# exact\testimate\tcount\tscore\tderivation', file=out) for group in groups: score = valuefunc(group.key) prob = semiring.as_real(semiring.divide(score, inside)) print('{0}\t{1}\t{2}\t{3}\t{4}'.format(prob, (group.count / total), group.count, score, derivation2str(group.key)), file=out)
def save_mc_derivations(path, groups, inside, valuefunc, derivation2str, semiring=semiring.inside): '\n\n :param path: where to save\n :param samples: sorted list of samples (obtained by group_by_identity)\n :param inside: inside at the root\n :param valuefunc: function to compute the value of the derivation\n :param derivation2str: how to obtain a string from a derivation\n :param semiring: semiring used to normalise probabilities\n ' with smart_wopen(path) as out: total = sum((group.count for group in groups)) print('# MC samples={0} inside={1} semiring={2}'.format(total, inside, semiring), file=out) print('# exact\testimate\tcount\tscore\tderivation', file=out) for group in groups: score = valuefunc(group.key) prob = semiring.as_real(semiring.divide(score, inside)) print('{0}\t{1}\t{2}\t{3}\t{4}'.format(prob, (group.count / total), group.count, score, derivation2str(group.key)), file=out)<|docstring|>:param path: where to save :param samples: sorted list of samples (obtained by group_by_identity) :param inside: inside at the root :param valuefunc: function to compute the value of the derivation :param derivation2str: how to obtain a string from a derivation :param semiring: semiring used to normalise probabilities<|endoftext|>
22b1e942726aff4c2a29f7a28fd4c2bac18d4f0ccd4d5ae71c621776a1dc3dbb
def save_mc_yields(path, groups): '\n :param path: where to save\n :param samples: sorted list of samples (obtained by group_by_projection)\n ' with smart_wopen(path) as out: total = sum((sample.count for sample in groups)) print('# MC samples={0}'.format(total), file=out) print('# estimate\tcount\tderivations\tyield', file=out) for (i, group) in enumerate(groups, 1): print('{0}\t{1}\t{2}\t{3}'.format((group.count / total), group.count, len(set(group.values)), group.key), file=out)
:param path: where to save :param samples: sorted list of samples (obtained by group_by_projection)
grasp/io/results.py
save_mc_yields
wilkeraziz/grasp
9
python
def save_mc_yields(path, groups): '\n :param path: where to save\n :param samples: sorted list of samples (obtained by group_by_projection)\n ' with smart_wopen(path) as out: total = sum((sample.count for sample in groups)) print('# MC samples={0}'.format(total), file=out) print('# estimate\tcount\tderivations\tyield', file=out) for (i, group) in enumerate(groups, 1): print('{0}\t{1}\t{2}\t{3}'.format((group.count / total), group.count, len(set(group.values)), group.key), file=out)
def save_mc_yields(path, groups): '\n :param path: where to save\n :param samples: sorted list of samples (obtained by group_by_projection)\n ' with smart_wopen(path) as out: total = sum((sample.count for sample in groups)) print('# MC samples={0}'.format(total), file=out) print('# estimate\tcount\tderivations\tyield', file=out) for (i, group) in enumerate(groups, 1): print('{0}\t{1}\t{2}\t{3}'.format((group.count / total), group.count, len(set(group.values)), group.key), file=out)<|docstring|>:param path: where to save :param samples: sorted list of samples (obtained by group_by_projection)<|endoftext|>
b489a92dc8c86672a2902b8e3a1938ca69ee18e99aeb383fbb2c5d5485096f87
def save_mcmc_derivations(path, groups, valuefunc, derivation2str, compfunc=None): '\n\n :param path: where to save\n :param samples: sorted list of samples (obtained by group_by_identity)\n :param valuefunc: compute the value of a derivation\n :param omega_d: a function over derivations\n ' with smart_wopen(path) as out: total = sum((group.count for group in groups)) print('# MCMC samples={0}'.format(total), file=out) if (compfunc is not None): print('# estimate\tcount\tscore\tderivation\tfeatures', file=out) for (i, group) in enumerate(groups, 1): sample = group.key print('{0}\t{1}\t{2}\t{3}\t{4}'.format((group.count / total), group.count, valuefunc(sample), derivation2str(sample), compfunc(sample)), file=out) else: print('# estimate\tcount\tscore\tderivation', file=out) for (i, group) in enumerate(groups, 1): sample = group.key print('{0}\t{1}\t{2}\t{3}'.format((group.count / total), group.count, valuefunc(sample), derivation2str(sample)), file=out)
:param path: where to save :param samples: sorted list of samples (obtained by group_by_identity) :param valuefunc: compute the value of a derivation :param omega_d: a function over derivations
grasp/io/results.py
save_mcmc_derivations
wilkeraziz/grasp
9
python
def save_mcmc_derivations(path, groups, valuefunc, derivation2str, compfunc=None): '\n\n :param path: where to save\n :param samples: sorted list of samples (obtained by group_by_identity)\n :param valuefunc: compute the value of a derivation\n :param omega_d: a function over derivations\n ' with smart_wopen(path) as out: total = sum((group.count for group in groups)) print('# MCMC samples={0}'.format(total), file=out) if (compfunc is not None): print('# estimate\tcount\tscore\tderivation\tfeatures', file=out) for (i, group) in enumerate(groups, 1): sample = group.key print('{0}\t{1}\t{2}\t{3}\t{4}'.format((group.count / total), group.count, valuefunc(sample), derivation2str(sample), compfunc(sample)), file=out) else: print('# estimate\tcount\tscore\tderivation', file=out) for (i, group) in enumerate(groups, 1): sample = group.key print('{0}\t{1}\t{2}\t{3}'.format((group.count / total), group.count, valuefunc(sample), derivation2str(sample)), file=out)
def save_mcmc_derivations(path, groups, valuefunc, derivation2str, compfunc=None): '\n\n :param path: where to save\n :param samples: sorted list of samples (obtained by group_by_identity)\n :param valuefunc: compute the value of a derivation\n :param omega_d: a function over derivations\n ' with smart_wopen(path) as out: total = sum((group.count for group in groups)) print('# MCMC samples={0}'.format(total), file=out) if (compfunc is not None): print('# estimate\tcount\tscore\tderivation\tfeatures', file=out) for (i, group) in enumerate(groups, 1): sample = group.key print('{0}\t{1}\t{2}\t{3}\t{4}'.format((group.count / total), group.count, valuefunc(sample), derivation2str(sample), compfunc(sample)), file=out) else: print('# estimate\tcount\tscore\tderivation', file=out) for (i, group) in enumerate(groups, 1): sample = group.key print('{0}\t{1}\t{2}\t{3}'.format((group.count / total), group.count, valuefunc(sample), derivation2str(sample)), file=out)<|docstring|>:param path: where to save :param samples: sorted list of samples (obtained by group_by_identity) :param valuefunc: compute the value of a derivation :param omega_d: a function over derivations<|endoftext|>
4ca713912122f500419227acb6f207c450b5f5aac7010e1cb902324d6ed588f5
def save_mcmc_yields(path, groups): '\n\n :param path: where to save\n :param groups: sorted list of sampled (obtained by group_by_projection)\n ' with smart_wopen(path) as out: total = sum((group.count for group in groups)) print('# MCMC samples={0}\n# estimate\tcount\tderivations\tyield'.format(total), file=out) for (i, group) in enumerate(groups, 1): print('{0}\t{1}\t{2}\t{3}'.format((float(group.count) / total), group.count, len(set(group.values)), group.key), file=out)
:param path: where to save :param groups: sorted list of sampled (obtained by group_by_projection)
grasp/io/results.py
save_mcmc_yields
wilkeraziz/grasp
9
python
def save_mcmc_yields(path, groups): '\n\n :param path: where to save\n :param groups: sorted list of sampled (obtained by group_by_projection)\n ' with smart_wopen(path) as out: total = sum((group.count for group in groups)) print('# MCMC samples={0}\n# estimate\tcount\tderivations\tyield'.format(total), file=out) for (i, group) in enumerate(groups, 1): print('{0}\t{1}\t{2}\t{3}'.format((float(group.count) / total), group.count, len(set(group.values)), group.key), file=out)
def save_mcmc_yields(path, groups): '\n\n :param path: where to save\n :param groups: sorted list of sampled (obtained by group_by_projection)\n ' with smart_wopen(path) as out: total = sum((group.count for group in groups)) print('# MCMC samples={0}\n# estimate\tcount\tderivations\tyield'.format(total), file=out) for (i, group) in enumerate(groups, 1): print('{0}\t{1}\t{2}\t{3}'.format((float(group.count) / total), group.count, len(set(group.values)), group.key), file=out)<|docstring|>:param path: where to save :param groups: sorted list of sampled (obtained by group_by_projection)<|endoftext|>
877998fd58c69d8c7cf3369161541f6899fbe2973ca4d77e8ceeca4119f6e816
def save_markov_chain(path, markov_chain, derivation2str, valuefunc=None, compfunc=None, flat=True): '\n\n :param path: where to save\n :param markov_chain: the original Markov chain\n :param valuefunc: an optional function over derivations that returns a score\n :param compfunc: an optional function over derivations that return feature components\n :param flat: whether the Markov chain is flat (each state represents a single derivation) or not,\n in which case each state is a sequence of derivations.\n ' if flat: with smart_wopen(path) as out: for d in markov_chain: fields = [] if (valuefunc is not None): fields.append(valuefunc(d)) fields.append(derivation2str(d)) if (compfunc is not None): fields.append(compfunc(d)) print('\t'.join((str(x) for x in fields)), file=out) else: with smart_wopen(path) as out: for (i, batch) in enumerate(markov_chain): for d in batch: fields = [i] if (valuefunc is not None): fields.append(valuefunc(d)) fields.append(derivation2str(d)) if (compfunc is not None): fields.append(compfunc(d)) print('\t'.join((str(x) for x in fields)), file=out)
:param path: where to save :param markov_chain: the original Markov chain :param valuefunc: an optional function over derivations that returns a score :param compfunc: an optional function over derivations that return feature components :param flat: whether the Markov chain is flat (each state represents a single derivation) or not, in which case each state is a sequence of derivations.
grasp/io/results.py
save_markov_chain
wilkeraziz/grasp
9
python
def save_markov_chain(path, markov_chain, derivation2str, valuefunc=None, compfunc=None, flat=True): '\n\n :param path: where to save\n :param markov_chain: the original Markov chain\n :param valuefunc: an optional function over derivations that returns a score\n :param compfunc: an optional function over derivations that return feature components\n :param flat: whether the Markov chain is flat (each state represents a single derivation) or not,\n in which case each state is a sequence of derivations.\n ' if flat: with smart_wopen(path) as out: for d in markov_chain: fields = [] if (valuefunc is not None): fields.append(valuefunc(d)) fields.append(derivation2str(d)) if (compfunc is not None): fields.append(compfunc(d)) print('\t'.join((str(x) for x in fields)), file=out) else: with smart_wopen(path) as out: for (i, batch) in enumerate(markov_chain): for d in batch: fields = [i] if (valuefunc is not None): fields.append(valuefunc(d)) fields.append(derivation2str(d)) if (compfunc is not None): fields.append(compfunc(d)) print('\t'.join((str(x) for x in fields)), file=out)
def save_markov_chain(path, markov_chain, derivation2str, valuefunc=None, compfunc=None, flat=True): '\n\n :param path: where to save\n :param markov_chain: the original Markov chain\n :param valuefunc: an optional function over derivations that returns a score\n :param compfunc: an optional function over derivations that return feature components\n :param flat: whether the Markov chain is flat (each state represents a single derivation) or not,\n in which case each state is a sequence of derivations.\n ' if flat: with smart_wopen(path) as out: for d in markov_chain: fields = [] if (valuefunc is not None): fields.append(valuefunc(d)) fields.append(derivation2str(d)) if (compfunc is not None): fields.append(compfunc(d)) print('\t'.join((str(x) for x in fields)), file=out) else: with smart_wopen(path) as out: for (i, batch) in enumerate(markov_chain): for d in batch: fields = [i] if (valuefunc is not None): fields.append(valuefunc(d)) fields.append(derivation2str(d)) if (compfunc is not None): fields.append(compfunc(d)) print('\t'.join((str(x) for x in fields)), file=out)<|docstring|>:param path: where to save :param markov_chain: the original Markov chain :param valuefunc: an optional function over derivations that returns a score :param compfunc: an optional function over derivations that return feature components :param flat: whether the Markov chain is flat (each state represents a single derivation) or not, in which case each state is a sequence of derivations.<|endoftext|>
60bd255f214c3aebe3763a4b77e5d08627dd589487f9086d754bcb4a6f3fa037
def fixup_old_nlocktimes(self): 'Fixup data from old format nlocktimes files\n\n Older nlocktimes files do not contain explicit prevout_signatures, prevout_scripts or\n prevout_script_types. Detect this and extract them from the raw transaction to make the\n txdata look consistent to the rest of the code. Note that segwit is not being handled\n here because old style nlocktimes predate segwit\n ' for txdata in self.txdata: if ('prevout_signatures' not in txdata): tx = txutil.from_hex(txdata['tx']) txdata['prevout_script_types'] = [] txdata['prevout_signatures'] = [] txdata['prevout_scripts'] = [] for i in range(wally.tx_get_num_inputs(tx)): inp = wally.tx_get_input_script(tx, i) ga_signature = b2h(inp[2:(inp[1] + 2)]) redeem_script = b2h(inp[(- 71):]) txdata['prevout_signatures'].append(ga_signature) txdata['prevout_scripts'].append(redeem_script) txdata['prevout_script_types'].append(gaconstants.P2SH_FORTIFIED_OUT)
Fixup data from old format nlocktimes files Older nlocktimes files do not contain explicit prevout_signatures, prevout_scripts or prevout_script_types. Detect this and extract them from the raw transaction to make the txdata look consistent to the rest of the code. Note that segwit is not being handled here because old style nlocktimes predate segwit
garecovery/two_of_two.py
fixup_old_nlocktimes
LeoComandini/garecovery
61
python
def fixup_old_nlocktimes(self): 'Fixup data from old format nlocktimes files\n\n Older nlocktimes files do not contain explicit prevout_signatures, prevout_scripts or\n prevout_script_types. Detect this and extract them from the raw transaction to make the\n txdata look consistent to the rest of the code. Note that segwit is not being handled\n here because old style nlocktimes predate segwit\n ' for txdata in self.txdata: if ('prevout_signatures' not in txdata): tx = txutil.from_hex(txdata['tx']) txdata['prevout_script_types'] = [] txdata['prevout_signatures'] = [] txdata['prevout_scripts'] = [] for i in range(wally.tx_get_num_inputs(tx)): inp = wally.tx_get_input_script(tx, i) ga_signature = b2h(inp[2:(inp[1] + 2)]) redeem_script = b2h(inp[(- 71):]) txdata['prevout_signatures'].append(ga_signature) txdata['prevout_scripts'].append(redeem_script) txdata['prevout_script_types'].append(gaconstants.P2SH_FORTIFIED_OUT)
def fixup_old_nlocktimes(self): 'Fixup data from old format nlocktimes files\n\n Older nlocktimes files do not contain explicit prevout_signatures, prevout_scripts or\n prevout_script_types. Detect this and extract them from the raw transaction to make the\n txdata look consistent to the rest of the code. Note that segwit is not being handled\n here because old style nlocktimes predate segwit\n ' for txdata in self.txdata: if ('prevout_signatures' not in txdata): tx = txutil.from_hex(txdata['tx']) txdata['prevout_script_types'] = [] txdata['prevout_signatures'] = [] txdata['prevout_scripts'] = [] for i in range(wally.tx_get_num_inputs(tx)): inp = wally.tx_get_input_script(tx, i) ga_signature = b2h(inp[2:(inp[1] + 2)]) redeem_script = b2h(inp[(- 71):]) txdata['prevout_signatures'].append(ga_signature) txdata['prevout_scripts'].append(redeem_script) txdata['prevout_script_types'].append(gaconstants.P2SH_FORTIFIED_OUT)<|docstring|>Fixup data from old format nlocktimes files Older nlocktimes files do not contain explicit prevout_signatures, prevout_scripts or prevout_script_types. Detect this and extract them from the raw transaction to make the txdata look consistent to the rest of the code. Note that segwit is not being handled here because old style nlocktimes predate segwit<|endoftext|>
2c00a268fae62f6b86cd71c643c6ab7b8c73fafa2bf4e8fa2e95f7f5ff5b3133
def infer_network(self): 'Return the network inferred from the GreenAddress xpub found in the redeem script\n\n This is determined by generating the sets of possible GreenAddress public keys for each\n network (testnet/mainnet) and then searching for them in the redeem script\n ' pointer = self.txdata[0]['prevout_pointers'][0] subaccount = self.txdata[0]['prevout_subaccounts'][0] def get_pubkey_for_pointer_hex(xpub): 'Return hex encoded public key derived from xpub for pointer' xpub = gacommon.derive_hd_key(xpub, [pointer], wally.BIP32_FLAG_KEY_PUBLIC) return b2h(wally.bip32_key_get_pub_key(xpub)) def get_pubkeys_hex(fn, keys_material, network): 'Return a list of hex-encoded public key given either a seed or a mnemonic' xpubs = fn(keys_material, subaccount, network) return [get_pubkey_for_pointer_hex(xpub) for xpub in xpubs] def get_pubkeys_for_network_hex(network): 'Return all the possible ga public keys (hex encoded) for the given network' pubkeys_hex = get_pubkeys_hex(ga_xpub.xpubs_from_seed, self.seed, network) if self.mnemonic: pubkeys_hex.extend(get_pubkeys_hex(ga_xpub.xpubs_from_mnemonic, self.mnemonic, network)) return pubkeys_hex mainnet_xpubs = get_pubkeys_for_network_hex('mainnet') testnet_xpubs = get_pubkeys_for_network_hex('testnet') redeem_script = self.txdata[0]['prevout_scripts'][0] if any(((xpub in redeem_script) for xpub in mainnet_xpubs)): return 'mainnet' if any(((xpub in redeem_script) for xpub in testnet_xpubs)): return 'testnet' logging.warn('Unable to detect network. Defaulting to mainnet. Consider passing the full mnemonic rather than hex seed') return 'mainnet'
Return the network inferred from the GreenAddress xpub found in the redeem script This is determined by generating the sets of possible GreenAddress public keys for each network (testnet/mainnet) and then searching for them in the redeem script
garecovery/two_of_two.py
infer_network
LeoComandini/garecovery
61
python
def infer_network(self): 'Return the network inferred from the GreenAddress xpub found in the redeem script\n\n This is determined by generating the sets of possible GreenAddress public keys for each\n network (testnet/mainnet) and then searching for them in the redeem script\n ' pointer = self.txdata[0]['prevout_pointers'][0] subaccount = self.txdata[0]['prevout_subaccounts'][0] def get_pubkey_for_pointer_hex(xpub): 'Return hex encoded public key derived from xpub for pointer' xpub = gacommon.derive_hd_key(xpub, [pointer], wally.BIP32_FLAG_KEY_PUBLIC) return b2h(wally.bip32_key_get_pub_key(xpub)) def get_pubkeys_hex(fn, keys_material, network): 'Return a list of hex-encoded public key given either a seed or a mnemonic' xpubs = fn(keys_material, subaccount, network) return [get_pubkey_for_pointer_hex(xpub) for xpub in xpubs] def get_pubkeys_for_network_hex(network): 'Return all the possible ga public keys (hex encoded) for the given network' pubkeys_hex = get_pubkeys_hex(ga_xpub.xpubs_from_seed, self.seed, network) if self.mnemonic: pubkeys_hex.extend(get_pubkeys_hex(ga_xpub.xpubs_from_mnemonic, self.mnemonic, network)) return pubkeys_hex mainnet_xpubs = get_pubkeys_for_network_hex('mainnet') testnet_xpubs = get_pubkeys_for_network_hex('testnet') redeem_script = self.txdata[0]['prevout_scripts'][0] if any(((xpub in redeem_script) for xpub in mainnet_xpubs)): return 'mainnet' if any(((xpub in redeem_script) for xpub in testnet_xpubs)): return 'testnet' logging.warn('Unable to detect network. Defaulting to mainnet. Consider passing the full mnemonic rather than hex seed') return 'mainnet'
def infer_network(self): 'Return the network inferred from the GreenAddress xpub found in the redeem script\n\n This is determined by generating the sets of possible GreenAddress public keys for each\n network (testnet/mainnet) and then searching for them in the redeem script\n ' pointer = self.txdata[0]['prevout_pointers'][0] subaccount = self.txdata[0]['prevout_subaccounts'][0] def get_pubkey_for_pointer_hex(xpub): 'Return hex encoded public key derived from xpub for pointer' xpub = gacommon.derive_hd_key(xpub, [pointer], wally.BIP32_FLAG_KEY_PUBLIC) return b2h(wally.bip32_key_get_pub_key(xpub)) def get_pubkeys_hex(fn, keys_material, network): 'Return a list of hex-encoded public key given either a seed or a mnemonic' xpubs = fn(keys_material, subaccount, network) return [get_pubkey_for_pointer_hex(xpub) for xpub in xpubs] def get_pubkeys_for_network_hex(network): 'Return all the possible ga public keys (hex encoded) for the given network' pubkeys_hex = get_pubkeys_hex(ga_xpub.xpubs_from_seed, self.seed, network) if self.mnemonic: pubkeys_hex.extend(get_pubkeys_hex(ga_xpub.xpubs_from_mnemonic, self.mnemonic, network)) return pubkeys_hex mainnet_xpubs = get_pubkeys_for_network_hex('mainnet') testnet_xpubs = get_pubkeys_for_network_hex('testnet') redeem_script = self.txdata[0]['prevout_scripts'][0] if any(((xpub in redeem_script) for xpub in mainnet_xpubs)): return 'mainnet' if any(((xpub in redeem_script) for xpub in testnet_xpubs)): return 'testnet' logging.warn('Unable to detect network. Defaulting to mainnet. Consider passing the full mnemonic rather than hex seed') return 'mainnet'<|docstring|>Return the network inferred from the GreenAddress xpub found in the redeem script This is determined by generating the sets of possible GreenAddress public keys for each network (testnet/mainnet) and then searching for them in the redeem script<|endoftext|>
3d220b3261e4a83767baaac98aa8a2d9513c4b205df426f3ecb0a645647a3ac1
def get_pubkey_for_pointer_hex(xpub): 'Return hex encoded public key derived from xpub for pointer' xpub = gacommon.derive_hd_key(xpub, [pointer], wally.BIP32_FLAG_KEY_PUBLIC) return b2h(wally.bip32_key_get_pub_key(xpub))
Return hex encoded public key derived from xpub for pointer
garecovery/two_of_two.py
get_pubkey_for_pointer_hex
LeoComandini/garecovery
61
python
def get_pubkey_for_pointer_hex(xpub): xpub = gacommon.derive_hd_key(xpub, [pointer], wally.BIP32_FLAG_KEY_PUBLIC) return b2h(wally.bip32_key_get_pub_key(xpub))
def get_pubkey_for_pointer_hex(xpub): xpub = gacommon.derive_hd_key(xpub, [pointer], wally.BIP32_FLAG_KEY_PUBLIC) return b2h(wally.bip32_key_get_pub_key(xpub))<|docstring|>Return hex encoded public key derived from xpub for pointer<|endoftext|>
3391131c7aa6560e3a25729375bec1bf02f6f6f449317df12333988d76444cff
def get_pubkeys_hex(fn, keys_material, network): 'Return a list of hex-encoded public key given either a seed or a mnemonic' xpubs = fn(keys_material, subaccount, network) return [get_pubkey_for_pointer_hex(xpub) for xpub in xpubs]
Return a list of hex-encoded public key given either a seed or a mnemonic
garecovery/two_of_two.py
get_pubkeys_hex
LeoComandini/garecovery
61
python
def get_pubkeys_hex(fn, keys_material, network): xpubs = fn(keys_material, subaccount, network) return [get_pubkey_for_pointer_hex(xpub) for xpub in xpubs]
def get_pubkeys_hex(fn, keys_material, network): xpubs = fn(keys_material, subaccount, network) return [get_pubkey_for_pointer_hex(xpub) for xpub in xpubs]<|docstring|>Return a list of hex-encoded public key given either a seed or a mnemonic<|endoftext|>
1210620c7b147bef2da96ba2c23f6791862712e0b3e78b9b4f14f6603f863d1a
def get_pubkeys_for_network_hex(network): 'Return all the possible ga public keys (hex encoded) for the given network' pubkeys_hex = get_pubkeys_hex(ga_xpub.xpubs_from_seed, self.seed, network) if self.mnemonic: pubkeys_hex.extend(get_pubkeys_hex(ga_xpub.xpubs_from_mnemonic, self.mnemonic, network)) return pubkeys_hex
Return all the possible ga public keys (hex encoded) for the given network
garecovery/two_of_two.py
get_pubkeys_for_network_hex
LeoComandini/garecovery
61
python
def get_pubkeys_for_network_hex(network): pubkeys_hex = get_pubkeys_hex(ga_xpub.xpubs_from_seed, self.seed, network) if self.mnemonic: pubkeys_hex.extend(get_pubkeys_hex(ga_xpub.xpubs_from_mnemonic, self.mnemonic, network)) return pubkeys_hex
def get_pubkeys_for_network_hex(network): pubkeys_hex = get_pubkeys_hex(ga_xpub.xpubs_from_seed, self.seed, network) if self.mnemonic: pubkeys_hex.extend(get_pubkeys_hex(ga_xpub.xpubs_from_mnemonic, self.mnemonic, network)) return pubkeys_hex<|docstring|>Return all the possible ga public keys (hex encoded) for the given network<|endoftext|>
6792e849d667673b1e466c53045defbf8ffc54a68724bbaa8964e214b4569eaf
def argsparselist(txt): '\n Validate a list of txt argument.\n\n :param txt: argument with comma separated int strings.\n :return: list of strings.\n ' txt = txt.split(',') listarg = [i.strip() for i in txt] return listarg
Validate a list of txt argument. :param txt: argument with comma separated int strings. :return: list of strings.
cpufreq/run.py
argsparselist
jok4r/cpufreq
3
python
def argsparselist(txt): '\n Validate a list of txt argument.\n\n :param txt: argument with comma separated int strings.\n :return: list of strings.\n ' txt = txt.split(',') listarg = [i.strip() for i in txt] return listarg
def argsparselist(txt): '\n Validate a list of txt argument.\n\n :param txt: argument with comma separated int strings.\n :return: list of strings.\n ' txt = txt.split(',') listarg = [i.strip() for i in txt] return listarg<|docstring|>Validate a list of txt argument. :param txt: argument with comma separated int strings. :return: list of strings.<|endoftext|>
a65900833cc59c5ea713b59b7ee3c4f60f8d28d329195183d2a53360cceeedb8
def argsparseintlist(txt): '\n Validate a list of int arguments.\n\n :param txt: argument with comma separated numbers.\n :return: list of integer converted numbers.\n ' txt = txt.split(',') listarg = [int(i) for i in txt] return listarg
Validate a list of int arguments. :param txt: argument with comma separated numbers. :return: list of integer converted numbers.
cpufreq/run.py
argsparseintlist
jok4r/cpufreq
3
python
def argsparseintlist(txt): '\n Validate a list of int arguments.\n\n :param txt: argument with comma separated numbers.\n :return: list of integer converted numbers.\n ' txt = txt.split(',') listarg = [int(i) for i in txt] return listarg
def argsparseintlist(txt): '\n Validate a list of int arguments.\n\n :param txt: argument with comma separated numbers.\n :return: list of integer converted numbers.\n ' txt = txt.split(',') listarg = [int(i) for i in txt] return listarg<|docstring|>Validate a list of int arguments. :param txt: argument with comma separated numbers. :return: list of integer converted numbers.<|endoftext|>
012098524dcfd21689ed9cfc9855079e5473808a916b4999c17f6f5734682d73
def argsparsevalidation(avail_govs): '\n Validation of script arguments passed via console.\n\n :return: argparse object with validated arguments.\n ' parser = argparse.ArgumentParser(description='Script to get and set frequencies configurationsof cpus by command line') p_group = parser.add_mutually_exclusive_group() p_group.add_argument('--info', action='store_true', help='Print status of governors and frequencies') p_group.add_argument('--reset', action='store_true', help='Reset the governors and max and min frequencies') subparsers = parser.add_subparsers(help='Available commands') parse_setgovernor = subparsers.add_parser('setgovernor', help='Set the governor for all online cpus or with optional specific cpus. Ex: cpufreq setgovernor "ondemand"') parse_setgovernor.add_argument('governor', help='Choice the governor name to set', choices=avail_govs) p_setgovernor_group = parse_setgovernor.add_mutually_exclusive_group() p_setgovernor_group.add_argument('--all', action='store_true', help='Set the governor for all online cpus.') p_setgovernor_group.add_argument('--cpus', type=argsparseintlist, help='List of CPUs numbers (first=0) to set gorvernor Ex: 0,1,3,5') parse_setfrequency = subparsers.add_parser('setfrequency', help='Set the frequency for all online cpus or with optional specific cpus. Ex: cpufreq setfrequency 2100000') parse_setfrequency.add_argument('frequency', help='Frequency value to set', type=int) p_setfrequency_group = parse_setfrequency.add_mutually_exclusive_group() p_setfrequency_group.add_argument('--all', action='store_true', help='Set the frequency for all online cpus.') p_setfrequency_group.add_argument('--cpus', type=argsparseintlist, help='List of CPUs numbers (first=0) to set frequency Ex: 0,1,3,5') args = parser.parse_args() return args
Validation of script arguments passed via console. :return: argparse object with validated arguments.
cpufreq/run.py
argsparsevalidation
jok4r/cpufreq
3
python
def argsparsevalidation(avail_govs): '\n Validation of script arguments passed via console.\n\n :return: argparse object with validated arguments.\n ' parser = argparse.ArgumentParser(description='Script to get and set frequencies configurationsof cpus by command line') p_group = parser.add_mutually_exclusive_group() p_group.add_argument('--info', action='store_true', help='Print status of governors and frequencies') p_group.add_argument('--reset', action='store_true', help='Reset the governors and max and min frequencies') subparsers = parser.add_subparsers(help='Available commands') parse_setgovernor = subparsers.add_parser('setgovernor', help='Set the governor for all online cpus or with optional specific cpus. Ex: cpufreq setgovernor "ondemand"') parse_setgovernor.add_argument('governor', help='Choice the governor name to set', choices=avail_govs) p_setgovernor_group = parse_setgovernor.add_mutually_exclusive_group() p_setgovernor_group.add_argument('--all', action='store_true', help='Set the governor for all online cpus.') p_setgovernor_group.add_argument('--cpus', type=argsparseintlist, help='List of CPUs numbers (first=0) to set gorvernor Ex: 0,1,3,5') parse_setfrequency = subparsers.add_parser('setfrequency', help='Set the frequency for all online cpus or with optional specific cpus. Ex: cpufreq setfrequency 2100000') parse_setfrequency.add_argument('frequency', help='Frequency value to set', type=int) p_setfrequency_group = parse_setfrequency.add_mutually_exclusive_group() p_setfrequency_group.add_argument('--all', action='store_true', help='Set the frequency for all online cpus.') p_setfrequency_group.add_argument('--cpus', type=argsparseintlist, help='List of CPUs numbers (first=0) to set frequency Ex: 0,1,3,5') args = parser.parse_args() return args
def argsparsevalidation(avail_govs): '\n Validation of script arguments passed via console.\n\n :return: argparse object with validated arguments.\n ' parser = argparse.ArgumentParser(description='Script to get and set frequencies configurationsof cpus by command line') p_group = parser.add_mutually_exclusive_group() p_group.add_argument('--info', action='store_true', help='Print status of governors and frequencies') p_group.add_argument('--reset', action='store_true', help='Reset the governors and max and min frequencies') subparsers = parser.add_subparsers(help='Available commands') parse_setgovernor = subparsers.add_parser('setgovernor', help='Set the governor for all online cpus or with optional specific cpus. Ex: cpufreq setgovernor "ondemand"') parse_setgovernor.add_argument('governor', help='Choice the governor name to set', choices=avail_govs) p_setgovernor_group = parse_setgovernor.add_mutually_exclusive_group() p_setgovernor_group.add_argument('--all', action='store_true', help='Set the governor for all online cpus.') p_setgovernor_group.add_argument('--cpus', type=argsparseintlist, help='List of CPUs numbers (first=0) to set gorvernor Ex: 0,1,3,5') parse_setfrequency = subparsers.add_parser('setfrequency', help='Set the frequency for all online cpus or with optional specific cpus. Ex: cpufreq setfrequency 2100000') parse_setfrequency.add_argument('frequency', help='Frequency value to set', type=int) p_setfrequency_group = parse_setfrequency.add_mutually_exclusive_group() p_setfrequency_group.add_argument('--all', action='store_true', help='Set the frequency for all online cpus.') p_setfrequency_group.add_argument('--cpus', type=argsparseintlist, help='List of CPUs numbers (first=0) to set frequency Ex: 0,1,3,5') args = parser.parse_args() return args<|docstring|>Validation of script arguments passed via console. :return: argparse object with validated arguments.<|endoftext|>
2949e818a81cd7db509c1855874ad55dd2261e0f3521c9ac144627eb1420c667
def main(): '\n Main function executed from console run.\n ' try: c = cpuFreq() except CPUFreqErrorInit as err: print('{}'.format(err)) exit() args = argsparsevalidation(c.available_governors) if (args.info is True): info(c) elif (args.reset is True): c.reset() print('Governors, maximum and minimum frequencies reset successfully.') elif hasattr(args, 'governor'): if (args.all == True): rg = None else: avail_cpus = c.get_online_cpus() if (not set(args.cpus).issubset(set(avail_cpus))): print('ERROR: cpu list has cpu number(s) that not in online cpus list.') exit(1) rg = args.cpus c.set_governors(gov=args.governor, rg=rg) print('Governor set successfully to cpus.') elif hasattr(args, 'frequency'): if (not (args.frequency in c.available_frequencies)): print('ERROR: frequency should be a value in list availabe frequencies: ') print(' ', c.available_frequencies) exit(1) if (args.all == True): rg = None else: avail_cpus = c.get_online_cpus() if (not set(args.cpus).issubset(set(avail_cpus))): print('ERROR: cpu list has cpu number(s) that not in online cpus list.') exit(1) rg = args.cpus c.set_frequencies(freq=args.frequency, rg=rg) print('Frequency set successfully to cpus.')
Main function executed from console run.
cpufreq/run.py
main
jok4r/cpufreq
3
python
def main(): '\n \n ' try: c = cpuFreq() except CPUFreqErrorInit as err: print('{}'.format(err)) exit() args = argsparsevalidation(c.available_governors) if (args.info is True): info(c) elif (args.reset is True): c.reset() print('Governors, maximum and minimum frequencies reset successfully.') elif hasattr(args, 'governor'): if (args.all == True): rg = None else: avail_cpus = c.get_online_cpus() if (not set(args.cpus).issubset(set(avail_cpus))): print('ERROR: cpu list has cpu number(s) that not in online cpus list.') exit(1) rg = args.cpus c.set_governors(gov=args.governor, rg=rg) print('Governor set successfully to cpus.') elif hasattr(args, 'frequency'): if (not (args.frequency in c.available_frequencies)): print('ERROR: frequency should be a value in list availabe frequencies: ') print(' ', c.available_frequencies) exit(1) if (args.all == True): rg = None else: avail_cpus = c.get_online_cpus() if (not set(args.cpus).issubset(set(avail_cpus))): print('ERROR: cpu list has cpu number(s) that not in online cpus list.') exit(1) rg = args.cpus c.set_frequencies(freq=args.frequency, rg=rg) print('Frequency set successfully to cpus.')
def main(): '\n \n ' try: c = cpuFreq() except CPUFreqErrorInit as err: print('{}'.format(err)) exit() args = argsparsevalidation(c.available_governors) if (args.info is True): info(c) elif (args.reset is True): c.reset() print('Governors, maximum and minimum frequencies reset successfully.') elif hasattr(args, 'governor'): if (args.all == True): rg = None else: avail_cpus = c.get_online_cpus() if (not set(args.cpus).issubset(set(avail_cpus))): print('ERROR: cpu list has cpu number(s) that not in online cpus list.') exit(1) rg = args.cpus c.set_governors(gov=args.governor, rg=rg) print('Governor set successfully to cpus.') elif hasattr(args, 'frequency'): if (not (args.frequency in c.available_frequencies)): print('ERROR: frequency should be a value in list availabe frequencies: ') print(' ', c.available_frequencies) exit(1) if (args.all == True): rg = None else: avail_cpus = c.get_online_cpus() if (not set(args.cpus).issubset(set(avail_cpus))): print('ERROR: cpu list has cpu number(s) that not in online cpus list.') exit(1) rg = args.cpus c.set_frequencies(freq=args.frequency, rg=rg) print('Frequency set successfully to cpus.')<|docstring|>Main function executed from console run.<|endoftext|>
99520ef8bec6d03f3d5abd45c7f5c1cf2f3e89c0c63ebb02b6f7d9c2eedad351
def detect_language(text, languages): 'Returns the detected language of given text.' pass
Returns the detected language of given text.
language_detector/main.py
detect_language
ine-rmotr-projects/pyp-w1-gw-language-detector
10
python
def detect_language(text, languages): pass
def detect_language(text, languages): pass<|docstring|>Returns the detected language of given text.<|endoftext|>
28f91b5d2c44d81f3895b31bf3811b1a296143860fb7d508994a4a14bb373df9
@torch.no_grad() def create_sintel_submission(model, iters=32, warm_start=False, output_path='sintel_submission'): ' Create submission for the Sintel leaderboard ' model.eval() for dstype in ['clean', 'final']: test_dataset = datasets.MpiSintel(split='test', aug_params=None, dstype=dstype) (flow_prev, sequence_prev) = (None, None) for test_id in range(len(test_dataset)): (image1, image2, (sequence, frame)) = test_dataset[test_id] if (sequence != sequence_prev): flow_prev = None padder = InputPadder(image1.shape) (image1, image2) = padder.pad(image1[None].cuda(), image2[None].cuda()) (flow_low, flow_pr) = model(image1, image2, iters=iters, flow_init=flow_prev, test_mode=True) flow = padder.unpad(flow_pr[0]).permute(1, 2, 0).cpu().numpy() if warm_start: flow_prev = forward_interpolate(flow_low[0])[None].cuda() output_dir = os.path.join(output_path, dstype, sequence) output_file = os.path.join(output_dir, ('frame%04d.flo' % (frame + 1))) if (not os.path.exists(output_dir)): os.makedirs(output_dir) frame_utils.writeFlow(output_file, flow) sequence_prev = sequence
Create submission for the Sintel leaderboard
evaluate.py
create_sintel_submission
Guominyingxiongququ/RAFT
0
python
@torch.no_grad() def create_sintel_submission(model, iters=32, warm_start=False, output_path='sintel_submission'): ' ' model.eval() for dstype in ['clean', 'final']: test_dataset = datasets.MpiSintel(split='test', aug_params=None, dstype=dstype) (flow_prev, sequence_prev) = (None, None) for test_id in range(len(test_dataset)): (image1, image2, (sequence, frame)) = test_dataset[test_id] if (sequence != sequence_prev): flow_prev = None padder = InputPadder(image1.shape) (image1, image2) = padder.pad(image1[None].cuda(), image2[None].cuda()) (flow_low, flow_pr) = model(image1, image2, iters=iters, flow_init=flow_prev, test_mode=True) flow = padder.unpad(flow_pr[0]).permute(1, 2, 0).cpu().numpy() if warm_start: flow_prev = forward_interpolate(flow_low[0])[None].cuda() output_dir = os.path.join(output_path, dstype, sequence) output_file = os.path.join(output_dir, ('frame%04d.flo' % (frame + 1))) if (not os.path.exists(output_dir)): os.makedirs(output_dir) frame_utils.writeFlow(output_file, flow) sequence_prev = sequence
@torch.no_grad() def create_sintel_submission(model, iters=32, warm_start=False, output_path='sintel_submission'): ' ' model.eval() for dstype in ['clean', 'final']: test_dataset = datasets.MpiSintel(split='test', aug_params=None, dstype=dstype) (flow_prev, sequence_prev) = (None, None) for test_id in range(len(test_dataset)): (image1, image2, (sequence, frame)) = test_dataset[test_id] if (sequence != sequence_prev): flow_prev = None padder = InputPadder(image1.shape) (image1, image2) = padder.pad(image1[None].cuda(), image2[None].cuda()) (flow_low, flow_pr) = model(image1, image2, iters=iters, flow_init=flow_prev, test_mode=True) flow = padder.unpad(flow_pr[0]).permute(1, 2, 0).cpu().numpy() if warm_start: flow_prev = forward_interpolate(flow_low[0])[None].cuda() output_dir = os.path.join(output_path, dstype, sequence) output_file = os.path.join(output_dir, ('frame%04d.flo' % (frame + 1))) if (not os.path.exists(output_dir)): os.makedirs(output_dir) frame_utils.writeFlow(output_file, flow) sequence_prev = sequence<|docstring|>Create submission for the Sintel leaderboard<|endoftext|>
7b7f821f7e94738df584a9c7a9f28235f90d6873dd0d2f669a0c38760dffd2af
@torch.no_grad() def create_kitti_submission(model, iters=24, output_path='kitti_submission'): ' Create submission for the Sintel leaderboard ' model.eval() test_dataset = datasets.KITTI(split='testing', aug_params=None) if (not os.path.exists(output_path)): os.makedirs(output_path) for test_id in range(len(test_dataset)): (image1, image2, (frame_id,)) = test_dataset[test_id] padder = InputPadder(image1.shape, mode='kitti') (image1, image2) = padder.pad(image1[None].cuda(), image2[None].cuda()) (_, flow_pr) = model(image1, image2, iters=iters, test_mode=True) flow = padder.unpad(flow_pr[0]).permute(1, 2, 0).cpu().numpy() output_filename = os.path.join(output_path, frame_id) frame_utils.writeFlowKITTI(output_filename, flow)
Create submission for the Sintel leaderboard
evaluate.py
create_kitti_submission
Guominyingxiongququ/RAFT
0
python
@torch.no_grad() def create_kitti_submission(model, iters=24, output_path='kitti_submission'): ' ' model.eval() test_dataset = datasets.KITTI(split='testing', aug_params=None) if (not os.path.exists(output_path)): os.makedirs(output_path) for test_id in range(len(test_dataset)): (image1, image2, (frame_id,)) = test_dataset[test_id] padder = InputPadder(image1.shape, mode='kitti') (image1, image2) = padder.pad(image1[None].cuda(), image2[None].cuda()) (_, flow_pr) = model(image1, image2, iters=iters, test_mode=True) flow = padder.unpad(flow_pr[0]).permute(1, 2, 0).cpu().numpy() output_filename = os.path.join(output_path, frame_id) frame_utils.writeFlowKITTI(output_filename, flow)
@torch.no_grad() def create_kitti_submission(model, iters=24, output_path='kitti_submission'): ' ' model.eval() test_dataset = datasets.KITTI(split='testing', aug_params=None) if (not os.path.exists(output_path)): os.makedirs(output_path) for test_id in range(len(test_dataset)): (image1, image2, (frame_id,)) = test_dataset[test_id] padder = InputPadder(image1.shape, mode='kitti') (image1, image2) = padder.pad(image1[None].cuda(), image2[None].cuda()) (_, flow_pr) = model(image1, image2, iters=iters, test_mode=True) flow = padder.unpad(flow_pr[0]).permute(1, 2, 0).cpu().numpy() output_filename = os.path.join(output_path, frame_id) frame_utils.writeFlowKITTI(output_filename, flow)<|docstring|>Create submission for the Sintel leaderboard<|endoftext|>
62cfccd38607797bdbce8954777c6130b59e7a6c74e25b92754bbe4c2d4e0ce0
@torch.no_grad() def validate_chairs(model, iters=24): ' Perform evaluation on the FlyingChairs (test) split ' model.eval() epe_list = [] val_dataset = datasets.FlyingChairs(split='validation') for val_id in range(len(val_dataset)): (image1, image2, flow_gt, _) = val_dataset[val_id] image1 = image1[None].cuda() image2 = image2[None].cuda() (_, flow_pr) = model(image1, image2, iters=iters, test_mode=True) epe = torch.sum(((flow_pr[0].cpu() - flow_gt) ** 2), dim=0).sqrt() epe_list.append(epe.view((- 1)).numpy()) epe = np.mean(np.concatenate(epe_list)) print(('Validation Chairs EPE: %f' % epe)) return {'chairs': epe}
Perform evaluation on the FlyingChairs (test) split
evaluate.py
validate_chairs
Guominyingxiongququ/RAFT
0
python
@torch.no_grad() def validate_chairs(model, iters=24): ' ' model.eval() epe_list = [] val_dataset = datasets.FlyingChairs(split='validation') for val_id in range(len(val_dataset)): (image1, image2, flow_gt, _) = val_dataset[val_id] image1 = image1[None].cuda() image2 = image2[None].cuda() (_, flow_pr) = model(image1, image2, iters=iters, test_mode=True) epe = torch.sum(((flow_pr[0].cpu() - flow_gt) ** 2), dim=0).sqrt() epe_list.append(epe.view((- 1)).numpy()) epe = np.mean(np.concatenate(epe_list)) print(('Validation Chairs EPE: %f' % epe)) return {'chairs': epe}
@torch.no_grad() def validate_chairs(model, iters=24): ' ' model.eval() epe_list = [] val_dataset = datasets.FlyingChairs(split='validation') for val_id in range(len(val_dataset)): (image1, image2, flow_gt, _) = val_dataset[val_id] image1 = image1[None].cuda() image2 = image2[None].cuda() (_, flow_pr) = model(image1, image2, iters=iters, test_mode=True) epe = torch.sum(((flow_pr[0].cpu() - flow_gt) ** 2), dim=0).sqrt() epe_list.append(epe.view((- 1)).numpy()) epe = np.mean(np.concatenate(epe_list)) print(('Validation Chairs EPE: %f' % epe)) return {'chairs': epe}<|docstring|>Perform evaluation on the FlyingChairs (test) split<|endoftext|>
b74e0df284ed37f7454aa75374e47381f6790c372279f27b1e3e7a0cf4a93cfb
@torch.no_grad() def validate_sintel(model, iters=32): ' Peform validation using the Sintel (train) split ' model.eval() results = {} for dstype in ['clean', 'final']: val_dataset = datasets.MpiSintel(split='training', dstype=dstype) epe_list = [] for val_id in range(len(val_dataset)): (image1, image2, flow_gt, _) = val_dataset[val_id] image1 = image1[None].cuda() image2 = image2[None].cuda() padder = InputPadder(image1.shape) (image1, image2) = padder.pad(image1, image2) (flow_low, flow_pr) = model(image1, image2, iters=iters, test_mode=True) flow = padder.unpad(flow_pr[0]).cpu() epe = torch.sum(((flow - flow_gt) ** 2), dim=0).sqrt() epe_list.append(epe.view((- 1)).numpy()) epe_all = np.concatenate(epe_list) epe = np.mean(epe_all) px1 = np.mean((epe_all < 1)) px3 = np.mean((epe_all < 3)) px5 = np.mean((epe_all < 5)) print(('Validation (%s) EPE: %f, 1px: %f, 3px: %f, 5px: %f' % (dstype, epe, px1, px3, px5))) results[dstype] = np.mean(epe_list) return results
Peform validation using the Sintel (train) split
evaluate.py
validate_sintel
Guominyingxiongququ/RAFT
0
python
@torch.no_grad() def validate_sintel(model, iters=32): ' ' model.eval() results = {} for dstype in ['clean', 'final']: val_dataset = datasets.MpiSintel(split='training', dstype=dstype) epe_list = [] for val_id in range(len(val_dataset)): (image1, image2, flow_gt, _) = val_dataset[val_id] image1 = image1[None].cuda() image2 = image2[None].cuda() padder = InputPadder(image1.shape) (image1, image2) = padder.pad(image1, image2) (flow_low, flow_pr) = model(image1, image2, iters=iters, test_mode=True) flow = padder.unpad(flow_pr[0]).cpu() epe = torch.sum(((flow - flow_gt) ** 2), dim=0).sqrt() epe_list.append(epe.view((- 1)).numpy()) epe_all = np.concatenate(epe_list) epe = np.mean(epe_all) px1 = np.mean((epe_all < 1)) px3 = np.mean((epe_all < 3)) px5 = np.mean((epe_all < 5)) print(('Validation (%s) EPE: %f, 1px: %f, 3px: %f, 5px: %f' % (dstype, epe, px1, px3, px5))) results[dstype] = np.mean(epe_list) return results
@torch.no_grad() def validate_sintel(model, iters=32): ' ' model.eval() results = {} for dstype in ['clean', 'final']: val_dataset = datasets.MpiSintel(split='training', dstype=dstype) epe_list = [] for val_id in range(len(val_dataset)): (image1, image2, flow_gt, _) = val_dataset[val_id] image1 = image1[None].cuda() image2 = image2[None].cuda() padder = InputPadder(image1.shape) (image1, image2) = padder.pad(image1, image2) (flow_low, flow_pr) = model(image1, image2, iters=iters, test_mode=True) flow = padder.unpad(flow_pr[0]).cpu() epe = torch.sum(((flow - flow_gt) ** 2), dim=0).sqrt() epe_list.append(epe.view((- 1)).numpy()) epe_all = np.concatenate(epe_list) epe = np.mean(epe_all) px1 = np.mean((epe_all < 1)) px3 = np.mean((epe_all < 3)) px5 = np.mean((epe_all < 5)) print(('Validation (%s) EPE: %f, 1px: %f, 3px: %f, 5px: %f' % (dstype, epe, px1, px3, px5))) results[dstype] = np.mean(epe_list) return results<|docstring|>Peform validation using the Sintel (train) split<|endoftext|>
894826271d926d63fb6b1428eb67ef062475e4e5fe559f6ba6b5f357031c3dea
@torch.no_grad() def validate_kitti(model, iters=24): ' Peform validation using the KITTI-2015 (train) split ' model.eval() val_dataset = datasets.KITTI(split='training') (out_list, epe_list) = ([], []) for val_id in range(len(val_dataset)): (image1, image2, flow_gt, valid_gt) = val_dataset[val_id] image1 = image1[None].cuda() image2 = image2[None].cuda() padder = InputPadder(image1.shape, mode='kitti') (image1, image2) = padder.pad(image1, image2) (flow_low, flow_pr) = model(image1, image2, iters=iters, test_mode=True) flow = padder.unpad(flow_pr[0]).cpu() epe = torch.sum(((flow - flow_gt) ** 2), dim=0).sqrt() mag = torch.sum((flow_gt ** 2), dim=0).sqrt() epe = epe.view((- 1)) mag = mag.view((- 1)) val = (valid_gt.view((- 1)) >= 0.5) out = ((epe > 3.0) & ((epe / mag) > 0.05)).float() epe_list.append(epe[val].mean().item()) out_list.append(out[val].cpu().numpy()) epe_list = np.array(epe_list) out_list = np.concatenate(out_list) epe = np.mean(epe_list) f1 = (100 * np.mean(out_list)) print(('Validation KITTI: %f, %f' % (epe, f1))) return {'kitti-epe': epe, 'kitti-f1': f1}
Peform validation using the KITTI-2015 (train) split
evaluate.py
validate_kitti
Guominyingxiongququ/RAFT
0
python
@torch.no_grad() def validate_kitti(model, iters=24): ' ' model.eval() val_dataset = datasets.KITTI(split='training') (out_list, epe_list) = ([], []) for val_id in range(len(val_dataset)): (image1, image2, flow_gt, valid_gt) = val_dataset[val_id] image1 = image1[None].cuda() image2 = image2[None].cuda() padder = InputPadder(image1.shape, mode='kitti') (image1, image2) = padder.pad(image1, image2) (flow_low, flow_pr) = model(image1, image2, iters=iters, test_mode=True) flow = padder.unpad(flow_pr[0]).cpu() epe = torch.sum(((flow - flow_gt) ** 2), dim=0).sqrt() mag = torch.sum((flow_gt ** 2), dim=0).sqrt() epe = epe.view((- 1)) mag = mag.view((- 1)) val = (valid_gt.view((- 1)) >= 0.5) out = ((epe > 3.0) & ((epe / mag) > 0.05)).float() epe_list.append(epe[val].mean().item()) out_list.append(out[val].cpu().numpy()) epe_list = np.array(epe_list) out_list = np.concatenate(out_list) epe = np.mean(epe_list) f1 = (100 * np.mean(out_list)) print(('Validation KITTI: %f, %f' % (epe, f1))) return {'kitti-epe': epe, 'kitti-f1': f1}
@torch.no_grad() def validate_kitti(model, iters=24): ' ' model.eval() val_dataset = datasets.KITTI(split='training') (out_list, epe_list) = ([], []) for val_id in range(len(val_dataset)): (image1, image2, flow_gt, valid_gt) = val_dataset[val_id] image1 = image1[None].cuda() image2 = image2[None].cuda() padder = InputPadder(image1.shape, mode='kitti') (image1, image2) = padder.pad(image1, image2) (flow_low, flow_pr) = model(image1, image2, iters=iters, test_mode=True) flow = padder.unpad(flow_pr[0]).cpu() epe = torch.sum(((flow - flow_gt) ** 2), dim=0).sqrt() mag = torch.sum((flow_gt ** 2), dim=0).sqrt() epe = epe.view((- 1)) mag = mag.view((- 1)) val = (valid_gt.view((- 1)) >= 0.5) out = ((epe > 3.0) & ((epe / mag) > 0.05)).float() epe_list.append(epe[val].mean().item()) out_list.append(out[val].cpu().numpy()) epe_list = np.array(epe_list) out_list = np.concatenate(out_list) epe = np.mean(epe_list) f1 = (100 * np.mean(out_list)) print(('Validation KITTI: %f, %f' % (epe, f1))) return {'kitti-epe': epe, 'kitti-f1': f1}<|docstring|>Peform validation using the KITTI-2015 (train) split<|endoftext|>
176900fec397ad58cb981518e09d77557aedf8c1c9c31090cfd0bcb9c7001e47
def create_spark_session(): '\n Create Spark session\n ' spark = SparkSession.builder.config('spark.jars.packages', 'org.apache.hadoop:hadoop-aws:2.7.0').getOrCreate() spark.conf.set('mapreduce.fileoutputcommitter.algorithm.version', '2') return spark
Create Spark session
udacity-data-engineering-nanodegree-project4-DL/etl.py
create_spark_session
eaggy/udacity-data-engineering-nanodegree
1
python
def create_spark_session(): '\n \n ' spark = SparkSession.builder.config('spark.jars.packages', 'org.apache.hadoop:hadoop-aws:2.7.0').getOrCreate() spark.conf.set('mapreduce.fileoutputcommitter.algorithm.version', '2') return spark
def create_spark_session(): '\n \n ' spark = SparkSession.builder.config('spark.jars.packages', 'org.apache.hadoop:hadoop-aws:2.7.0').getOrCreate() spark.conf.set('mapreduce.fileoutputcommitter.algorithm.version', '2') return spark<|docstring|>Create Spark session<|endoftext|>
c603d46a4527db2c9e7521b7c01737139b579e2abb0e24a3ba135642f77bd305
def process_song_data(spark, input_data, output_data): '\n Read song data files in JSON-format from Amazon S3,\n load the processed data into two analytical tables,\n and write these tables as parquet files back to Amazon S3.\n ' song_data = os.path.join(input_data, 'song_data', *(3 * ['*']), '*.json') song_data_schema = R([Fld('artist_id', Str(), False), Fld('artist_latitude', Str(), True), Fld('artist_longitude', Str(), True), Fld('artist_location', Str(), True), Fld('artist_name', Str(), False), Fld('song_id', Str(), False), Fld('title', Str(), False), Fld('duration', Dbl(), False), Fld('year', Int(), False)]) df = spark.read.json(path=song_data, schema=song_data_schema) songs_table = df.select('song_id', 'title', 'artist_id', 'year', 'duration').distinct() songs_table.write.parquet((output_data + 'songs_table.parquet'), mode='overwrite', partitionBy=['year', 'artist_id']) artists_table = df.select('artist_id', col('artist_name').alias('name'), col('artist_location').alias('location'), col('artist_latitude').alias('latitude'), col('artist_longitude').alias('longitude')).distinct() artists_table.write.parquet((output_data + 'artists_table.parquet'), mode='overwrite')
Read song data files in JSON-format from Amazon S3, load the processed data into two analytical tables, and write these tables as parquet files back to Amazon S3.
udacity-data-engineering-nanodegree-project4-DL/etl.py
process_song_data
eaggy/udacity-data-engineering-nanodegree
1
python
def process_song_data(spark, input_data, output_data): '\n Read song data files in JSON-format from Amazon S3,\n load the processed data into two analytical tables,\n and write these tables as parquet files back to Amazon S3.\n ' song_data = os.path.join(input_data, 'song_data', *(3 * ['*']), '*.json') song_data_schema = R([Fld('artist_id', Str(), False), Fld('artist_latitude', Str(), True), Fld('artist_longitude', Str(), True), Fld('artist_location', Str(), True), Fld('artist_name', Str(), False), Fld('song_id', Str(), False), Fld('title', Str(), False), Fld('duration', Dbl(), False), Fld('year', Int(), False)]) df = spark.read.json(path=song_data, schema=song_data_schema) songs_table = df.select('song_id', 'title', 'artist_id', 'year', 'duration').distinct() songs_table.write.parquet((output_data + 'songs_table.parquet'), mode='overwrite', partitionBy=['year', 'artist_id']) artists_table = df.select('artist_id', col('artist_name').alias('name'), col('artist_location').alias('location'), col('artist_latitude').alias('latitude'), col('artist_longitude').alias('longitude')).distinct() artists_table.write.parquet((output_data + 'artists_table.parquet'), mode='overwrite')
def process_song_data(spark, input_data, output_data): '\n Read song data files in JSON-format from Amazon S3,\n load the processed data into two analytical tables,\n and write these tables as parquet files back to Amazon S3.\n ' song_data = os.path.join(input_data, 'song_data', *(3 * ['*']), '*.json') song_data_schema = R([Fld('artist_id', Str(), False), Fld('artist_latitude', Str(), True), Fld('artist_longitude', Str(), True), Fld('artist_location', Str(), True), Fld('artist_name', Str(), False), Fld('song_id', Str(), False), Fld('title', Str(), False), Fld('duration', Dbl(), False), Fld('year', Int(), False)]) df = spark.read.json(path=song_data, schema=song_data_schema) songs_table = df.select('song_id', 'title', 'artist_id', 'year', 'duration').distinct() songs_table.write.parquet((output_data + 'songs_table.parquet'), mode='overwrite', partitionBy=['year', 'artist_id']) artists_table = df.select('artist_id', col('artist_name').alias('name'), col('artist_location').alias('location'), col('artist_latitude').alias('latitude'), col('artist_longitude').alias('longitude')).distinct() artists_table.write.parquet((output_data + 'artists_table.parquet'), mode='overwrite')<|docstring|>Read song data files in JSON-format from Amazon S3, load the processed data into two analytical tables, and write these tables as parquet files back to Amazon S3.<|endoftext|>
70ed44616f739397cc62f274c47794602c78fd4dee832db67d62839200124b6c
def process_log_data(spark, input_data, output_data): '\n Read log data files in JSON-format from Amazon S3,\n load the processed data into three analytical tables,\n and write these tables as parquet files back to Amazon S3.\n ' log_data = os.path.join(input_data, 'log_data', *(2 * ['*']), '*.json') log_data_schema = R([Fld('artist', Str(), True), Fld('auth', Str(), False), Fld('firstName', Str(), True), Fld('gender', Str(), True), Fld('itemInSession', Int(), False), Fld('lastName', Str(), True), Fld('length', Dbl(), True), Fld('level', Str(), False), Fld('location', Str(), True), Fld('method', Str(), False), Fld('page', Str(), False), Fld('registration', Dbl(), True), Fld('sessionId', Int(), False), Fld('song', Str(), True), Fld('status', Int(), False), Fld('ts', Lng(), False), Fld('userAgent', Str(), True), Fld('userId', Str(), True)]) df = spark.read.json(path=log_data, schema=log_data_schema) df = df.filter((col('page') == 'NextSong')) users_table = df.withColumn('max_ts_usr', max('ts').over(Window.partitionBy('userID'))).filter((((col('ts') == col('max_ts_usr')) & col('userID').isNotNull()) & (col('userID') != ''))).select(col('userID').alias('user_id'), col('firstName').alias('first_name'), col('lastName').alias('last_name'), 'gender', 'level').distinct() users_table.write.parquet((output_data + 'users_table.parquet'), mode='overwrite', partitionBy=['gender', 'level']) get_datetime = udf((lambda x: datetime.fromtimestamp((x / 1000)).replace(microsecond=0)), Time()) df = df.withColumn('start_time', get_datetime('ts')) time_table = df.withColumn('hour', hour('start_time')).withColumn('day', dayofmonth('start_time')).withColumn('week', weekofyear('start_time')).withColumn('month', month('start_time')).withColumn('year', year('start_time')).withColumn('weekday', dayofweek('start_time')).select('start_time', 'hour', 'day', 'week', 'month', 'year', 'weekday').distinct() time_table.write.parquet((output_data + 'time_table.parquet'), mode='overwrite', partitionBy=['year', 'month']) songs_table = spark.read.parquet((output_data + 'songs_table.parquet')) artists_table = spark.read.parquet((output_data + 'artists_table.parquet')) songs = songs_table.join(artists_table, 'artist_id', how='left').select('song_id', 'title', 'artist_id', 'name', 'duration') songplays_table = df.join(songs, [(df.artist == songs.name), (df.song == songs.title), (df.length == songs.duration)], how='inner') songplays_table = songplays_table.join(time_table, 'start_time', 'left').select('start_time', col('userId').alias('user_id'), 'level', 'song_id', 'artist_id', col('sessionId').alias('session_id'), 'location', col('userAgent').alias('user_agent'), 'year', 'month').withColumn('songplay_id', monotonically_increasing_id()) songplays_table.write.parquet((output_data + 'songplays_table.parquet'), mode='overwrite', partitionBy=['year', 'month'])
Read log data files in JSON-format from Amazon S3, load the processed data into three analytical tables, and write these tables as parquet files back to Amazon S3.
udacity-data-engineering-nanodegree-project4-DL/etl.py
process_log_data
eaggy/udacity-data-engineering-nanodegree
1
python
def process_log_data(spark, input_data, output_data): '\n Read log data files in JSON-format from Amazon S3,\n load the processed data into three analytical tables,\n and write these tables as parquet files back to Amazon S3.\n ' log_data = os.path.join(input_data, 'log_data', *(2 * ['*']), '*.json') log_data_schema = R([Fld('artist', Str(), True), Fld('auth', Str(), False), Fld('firstName', Str(), True), Fld('gender', Str(), True), Fld('itemInSession', Int(), False), Fld('lastName', Str(), True), Fld('length', Dbl(), True), Fld('level', Str(), False), Fld('location', Str(), True), Fld('method', Str(), False), Fld('page', Str(), False), Fld('registration', Dbl(), True), Fld('sessionId', Int(), False), Fld('song', Str(), True), Fld('status', Int(), False), Fld('ts', Lng(), False), Fld('userAgent', Str(), True), Fld('userId', Str(), True)]) df = spark.read.json(path=log_data, schema=log_data_schema) df = df.filter((col('page') == 'NextSong')) users_table = df.withColumn('max_ts_usr', max('ts').over(Window.partitionBy('userID'))).filter((((col('ts') == col('max_ts_usr')) & col('userID').isNotNull()) & (col('userID') != ))).select(col('userID').alias('user_id'), col('firstName').alias('first_name'), col('lastName').alias('last_name'), 'gender', 'level').distinct() users_table.write.parquet((output_data + 'users_table.parquet'), mode='overwrite', partitionBy=['gender', 'level']) get_datetime = udf((lambda x: datetime.fromtimestamp((x / 1000)).replace(microsecond=0)), Time()) df = df.withColumn('start_time', get_datetime('ts')) time_table = df.withColumn('hour', hour('start_time')).withColumn('day', dayofmonth('start_time')).withColumn('week', weekofyear('start_time')).withColumn('month', month('start_time')).withColumn('year', year('start_time')).withColumn('weekday', dayofweek('start_time')).select('start_time', 'hour', 'day', 'week', 'month', 'year', 'weekday').distinct() time_table.write.parquet((output_data + 'time_table.parquet'), mode='overwrite', partitionBy=['year', 'month']) songs_table = spark.read.parquet((output_data + 'songs_table.parquet')) artists_table = spark.read.parquet((output_data + 'artists_table.parquet')) songs = songs_table.join(artists_table, 'artist_id', how='left').select('song_id', 'title', 'artist_id', 'name', 'duration') songplays_table = df.join(songs, [(df.artist == songs.name), (df.song == songs.title), (df.length == songs.duration)], how='inner') songplays_table = songplays_table.join(time_table, 'start_time', 'left').select('start_time', col('userId').alias('user_id'), 'level', 'song_id', 'artist_id', col('sessionId').alias('session_id'), 'location', col('userAgent').alias('user_agent'), 'year', 'month').withColumn('songplay_id', monotonically_increasing_id()) songplays_table.write.parquet((output_data + 'songplays_table.parquet'), mode='overwrite', partitionBy=['year', 'month'])
def process_log_data(spark, input_data, output_data): '\n Read log data files in JSON-format from Amazon S3,\n load the processed data into three analytical tables,\n and write these tables as parquet files back to Amazon S3.\n ' log_data = os.path.join(input_data, 'log_data', *(2 * ['*']), '*.json') log_data_schema = R([Fld('artist', Str(), True), Fld('auth', Str(), False), Fld('firstName', Str(), True), Fld('gender', Str(), True), Fld('itemInSession', Int(), False), Fld('lastName', Str(), True), Fld('length', Dbl(), True), Fld('level', Str(), False), Fld('location', Str(), True), Fld('method', Str(), False), Fld('page', Str(), False), Fld('registration', Dbl(), True), Fld('sessionId', Int(), False), Fld('song', Str(), True), Fld('status', Int(), False), Fld('ts', Lng(), False), Fld('userAgent', Str(), True), Fld('userId', Str(), True)]) df = spark.read.json(path=log_data, schema=log_data_schema) df = df.filter((col('page') == 'NextSong')) users_table = df.withColumn('max_ts_usr', max('ts').over(Window.partitionBy('userID'))).filter((((col('ts') == col('max_ts_usr')) & col('userID').isNotNull()) & (col('userID') != ))).select(col('userID').alias('user_id'), col('firstName').alias('first_name'), col('lastName').alias('last_name'), 'gender', 'level').distinct() users_table.write.parquet((output_data + 'users_table.parquet'), mode='overwrite', partitionBy=['gender', 'level']) get_datetime = udf((lambda x: datetime.fromtimestamp((x / 1000)).replace(microsecond=0)), Time()) df = df.withColumn('start_time', get_datetime('ts')) time_table = df.withColumn('hour', hour('start_time')).withColumn('day', dayofmonth('start_time')).withColumn('week', weekofyear('start_time')).withColumn('month', month('start_time')).withColumn('year', year('start_time')).withColumn('weekday', dayofweek('start_time')).select('start_time', 'hour', 'day', 'week', 'month', 'year', 'weekday').distinct() time_table.write.parquet((output_data + 'time_table.parquet'), mode='overwrite', partitionBy=['year', 'month']) songs_table = spark.read.parquet((output_data + 'songs_table.parquet')) artists_table = spark.read.parquet((output_data + 'artists_table.parquet')) songs = songs_table.join(artists_table, 'artist_id', how='left').select('song_id', 'title', 'artist_id', 'name', 'duration') songplays_table = df.join(songs, [(df.artist == songs.name), (df.song == songs.title), (df.length == songs.duration)], how='inner') songplays_table = songplays_table.join(time_table, 'start_time', 'left').select('start_time', col('userId').alias('user_id'), 'level', 'song_id', 'artist_id', col('sessionId').alias('session_id'), 'location', col('userAgent').alias('user_agent'), 'year', 'month').withColumn('songplay_id', monotonically_increasing_id()) songplays_table.write.parquet((output_data + 'songplays_table.parquet'), mode='overwrite', partitionBy=['year', 'month'])<|docstring|>Read log data files in JSON-format from Amazon S3, load the processed data into three analytical tables, and write these tables as parquet files back to Amazon S3.<|endoftext|>
da8bd4bb0f29f767fb76f82ef2ca76f82d64c09db04e04a561e7eb8fd1e24f27
def main(): '\n 1. Create an object of `ConfigParser` and read AWS-credentials and output bucket name into it.\n 2. Create an Amazon S3 bucket for output.\n 3. Create a Spark session.\n 4. Process song data files.\n 5. Process log data files.\n ' config = configparser.ConfigParser() config.read('dl.cfg') os.environ['AWS_ACCESS_KEY_ID'] = config['AWS']['AWS_ACCESS_KEY_ID'] os.environ['AWS_SECRET_ACCESS_KEY'] = config['AWS']['AWS_SECRET_ACCESS_KEY'] output_bucket = config['BUCKET']['BUCKET_OUTPUT_NAME'] input_data = 's3a://udacity-dend/' output_data = 's3a://{}/'.format(output_bucket) try: s3 = boto3.client('s3') s3.create_bucket(Bucket=output_bucket) spark = create_spark_session() if spark: process_song_data(spark, input_data, output_data) process_log_data(spark, input_data, output_data) except ParamValidationError as e: print(e) except ClientError as e: print(e)
1. Create an object of `ConfigParser` and read AWS-credentials and output bucket name into it. 2. Create an Amazon S3 bucket for output. 3. Create a Spark session. 4. Process song data files. 5. Process log data files.
udacity-data-engineering-nanodegree-project4-DL/etl.py
main
eaggy/udacity-data-engineering-nanodegree
1
python
def main(): '\n 1. Create an object of `ConfigParser` and read AWS-credentials and output bucket name into it.\n 2. Create an Amazon S3 bucket for output.\n 3. Create a Spark session.\n 4. Process song data files.\n 5. Process log data files.\n ' config = configparser.ConfigParser() config.read('dl.cfg') os.environ['AWS_ACCESS_KEY_ID'] = config['AWS']['AWS_ACCESS_KEY_ID'] os.environ['AWS_SECRET_ACCESS_KEY'] = config['AWS']['AWS_SECRET_ACCESS_KEY'] output_bucket = config['BUCKET']['BUCKET_OUTPUT_NAME'] input_data = 's3a://udacity-dend/' output_data = 's3a://{}/'.format(output_bucket) try: s3 = boto3.client('s3') s3.create_bucket(Bucket=output_bucket) spark = create_spark_session() if spark: process_song_data(spark, input_data, output_data) process_log_data(spark, input_data, output_data) except ParamValidationError as e: print(e) except ClientError as e: print(e)
def main(): '\n 1. Create an object of `ConfigParser` and read AWS-credentials and output bucket name into it.\n 2. Create an Amazon S3 bucket for output.\n 3. Create a Spark session.\n 4. Process song data files.\n 5. Process log data files.\n ' config = configparser.ConfigParser() config.read('dl.cfg') os.environ['AWS_ACCESS_KEY_ID'] = config['AWS']['AWS_ACCESS_KEY_ID'] os.environ['AWS_SECRET_ACCESS_KEY'] = config['AWS']['AWS_SECRET_ACCESS_KEY'] output_bucket = config['BUCKET']['BUCKET_OUTPUT_NAME'] input_data = 's3a://udacity-dend/' output_data = 's3a://{}/'.format(output_bucket) try: s3 = boto3.client('s3') s3.create_bucket(Bucket=output_bucket) spark = create_spark_session() if spark: process_song_data(spark, input_data, output_data) process_log_data(spark, input_data, output_data) except ParamValidationError as e: print(e) except ClientError as e: print(e)<|docstring|>1. Create an object of `ConfigParser` and read AWS-credentials and output bucket name into it. 2. Create an Amazon S3 bucket for output. 3. Create a Spark session. 4. Process song data files. 5. Process log data files.<|endoftext|>
7a84bd0bfe568c43b17b26a7bcc789c63db4aa1ae49c907fb7b9fb655f9c4551
def adjust_csp(self, dictionary: dict, append: bool=True) -> None: 'If the default CSP settings needs to be changed, this function can\n be called by giving in a dictionary with key-value pairs which should\n be changed. If `append=True`, the CSP sources given in the dictionary\n are appended to the whitelisted sources in the default configuration.\n If not the input value source list is instead replacing the default\n whitelisted sources.\n ' for (key, value) in dictionary.items(): if (append and (key in self._csp)): self._csp[key] += value else: self._csp[key] = value
If the default CSP settings needs to be changed, this function can be called by giving in a dictionary with key-value pairs which should be changed. If `append=True`, the CSP sources given in the dictionary are appended to the whitelisted sources in the default configuration. If not the input value source list is instead replacing the default whitelisted sources.
webviz_config/_theme_class.py
adjust_csp
magnesj/webviz-config
44
python
def adjust_csp(self, dictionary: dict, append: bool=True) -> None: 'If the default CSP settings needs to be changed, this function can\n be called by giving in a dictionary with key-value pairs which should\n be changed. If `append=True`, the CSP sources given in the dictionary\n are appended to the whitelisted sources in the default configuration.\n If not the input value source list is instead replacing the default\n whitelisted sources.\n ' for (key, value) in dictionary.items(): if (append and (key in self._csp)): self._csp[key] += value else: self._csp[key] = value
def adjust_csp(self, dictionary: dict, append: bool=True) -> None: 'If the default CSP settings needs to be changed, this function can\n be called by giving in a dictionary with key-value pairs which should\n be changed. If `append=True`, the CSP sources given in the dictionary\n are appended to the whitelisted sources in the default configuration.\n If not the input value source list is instead replacing the default\n whitelisted sources.\n ' for (key, value) in dictionary.items(): if (append and (key in self._csp)): self._csp[key] += value else: self._csp[key] = value<|docstring|>If the default CSP settings needs to be changed, this function can be called by giving in a dictionary with key-value pairs which should be changed. If `append=True`, the CSP sources given in the dictionary are appended to the whitelisted sources in the default configuration. If not the input value source list is instead replacing the default whitelisted sources.<|endoftext|>
40c3724ac2c4177969fd1ad589b37eb5ef43ffd24f4af179c23ec615cba068ac
def create_themed_layout(self, layout: dict) -> dict: '\n Create a new Plotly layout dict by merging the input layout with the theme layout,\n prioritizing the input layout if there are conflicts with the theme layout. In addition:\n For the special case of multiple xaxes or yaxes, e.g. xaxis2 and xaxis3 (for a secondary\n and tertiary xaxis), the axis will get the theme xaxis/yaxis layout, unless they are\n defined themselves as e.g. xaxis2 in the theme layout. Note that e.g. xaxis2 still needs to\n be in the input layout, just not in the theme layout.\n ' def deep_update(update: dict, ref: dict) -> dict: for (key, value) in ref.items(): if (key in update): if isinstance(value, dict): if isinstance(update[key], dict): update[key] = deep_update(update[key], value) if (key in ['xaxis', 'yaxis']): for kkey in update: if ((kkey not in ref) and kkey.startswith(key)): update[kkey] = deep_update(update[kkey], value) else: update[key] = value return update return deep_update(copy.deepcopy(layout), copy.deepcopy(self._plotly_theme['layout']))
Create a new Plotly layout dict by merging the input layout with the theme layout, prioritizing the input layout if there are conflicts with the theme layout. In addition: For the special case of multiple xaxes or yaxes, e.g. xaxis2 and xaxis3 (for a secondary and tertiary xaxis), the axis will get the theme xaxis/yaxis layout, unless they are defined themselves as e.g. xaxis2 in the theme layout. Note that e.g. xaxis2 still needs to be in the input layout, just not in the theme layout.
webviz_config/_theme_class.py
create_themed_layout
magnesj/webviz-config
44
python
def create_themed_layout(self, layout: dict) -> dict: '\n Create a new Plotly layout dict by merging the input layout with the theme layout,\n prioritizing the input layout if there are conflicts with the theme layout. In addition:\n For the special case of multiple xaxes or yaxes, e.g. xaxis2 and xaxis3 (for a secondary\n and tertiary xaxis), the axis will get the theme xaxis/yaxis layout, unless they are\n defined themselves as e.g. xaxis2 in the theme layout. Note that e.g. xaxis2 still needs to\n be in the input layout, just not in the theme layout.\n ' def deep_update(update: dict, ref: dict) -> dict: for (key, value) in ref.items(): if (key in update): if isinstance(value, dict): if isinstance(update[key], dict): update[key] = deep_update(update[key], value) if (key in ['xaxis', 'yaxis']): for kkey in update: if ((kkey not in ref) and kkey.startswith(key)): update[kkey] = deep_update(update[kkey], value) else: update[key] = value return update return deep_update(copy.deepcopy(layout), copy.deepcopy(self._plotly_theme['layout']))
def create_themed_layout(self, layout: dict) -> dict: '\n Create a new Plotly layout dict by merging the input layout with the theme layout,\n prioritizing the input layout if there are conflicts with the theme layout. In addition:\n For the special case of multiple xaxes or yaxes, e.g. xaxis2 and xaxis3 (for a secondary\n and tertiary xaxis), the axis will get the theme xaxis/yaxis layout, unless they are\n defined themselves as e.g. xaxis2 in the theme layout. Note that e.g. xaxis2 still needs to\n be in the input layout, just not in the theme layout.\n ' def deep_update(update: dict, ref: dict) -> dict: for (key, value) in ref.items(): if (key in update): if isinstance(value, dict): if isinstance(update[key], dict): update[key] = deep_update(update[key], value) if (key in ['xaxis', 'yaxis']): for kkey in update: if ((kkey not in ref) and kkey.startswith(key)): update[kkey] = deep_update(update[kkey], value) else: update[key] = value return update return deep_update(copy.deepcopy(layout), copy.deepcopy(self._plotly_theme['layout']))<|docstring|>Create a new Plotly layout dict by merging the input layout with the theme layout, prioritizing the input layout if there are conflicts with the theme layout. In addition: For the special case of multiple xaxes or yaxes, e.g. xaxis2 and xaxis3 (for a secondary and tertiary xaxis), the axis will get the theme xaxis/yaxis layout, unless they are defined themselves as e.g. xaxis2 in the theme layout. Note that e.g. xaxis2 still needs to be in the input layout, just not in the theme layout.<|endoftext|>
bff973a6e15029fd8d6f1d77e7082b05e0fc24e5ca8195a5da61de95e9ab2979
@property def csp(self) -> dict: 'Returns the content security policy settings for the theme.' return self._csp
Returns the content security policy settings for the theme.
webviz_config/_theme_class.py
csp
magnesj/webviz-config
44
python
@property def csp(self) -> dict: return self._csp
@property def csp(self) -> dict: return self._csp<|docstring|>Returns the content security policy settings for the theme.<|endoftext|>
6777ab4c67b85f895f1d674f89e71e73de93730f75c6a52ca7617e234480e208
@property def feature_policy(self) -> dict: 'Returns the feature policy settings for the theme.' return self._feature_policy
Returns the feature policy settings for the theme.
webviz_config/_theme_class.py
feature_policy
magnesj/webviz-config
44
python
@property def feature_policy(self) -> dict: return self._feature_policy
@property def feature_policy(self) -> dict: return self._feature_policy<|docstring|>Returns the feature policy settings for the theme.<|endoftext|>
7ee3aa0bc63b553ae21dd1ab74197214061210a8675738db9567dca68715d833
@plotly_theme.setter def plotly_theme(self, plotly_theme: dict) -> None: 'Layout object of Plotly graph objects.' self._plotly_theme = plotly_theme
Layout object of Plotly graph objects.
webviz_config/_theme_class.py
plotly_theme
magnesj/webviz-config
44
python
@plotly_theme.setter def plotly_theme(self, plotly_theme: dict) -> None: self._plotly_theme = plotly_theme
@plotly_theme.setter def plotly_theme(self, plotly_theme: dict) -> None: self._plotly_theme = plotly_theme<|docstring|>Layout object of Plotly graph objects.<|endoftext|>