INSTRUCTION
stringlengths
1
46.3k
RESPONSE
stringlengths
75
80.2k
Remove a component from the pipeline. name (unicode): Name of the component to remove. RETURNS (tuple): A `(name, component)` tuple of the removed component. DOCS: https://spacy.io/api/language#remove_pipe
def remove_pipe(self, name): """Remove a component from the pipeline. name (unicode): Name of the component to remove. RETURNS (tuple): A `(name, component)` tuple of the removed component. DOCS: https://spacy.io/api/language#remove_pipe """ if name not in self.pipe_names: raise ValueError(Errors.E001.format(name=name, opts=self.pipe_names)) return self.pipeline.pop(self.pipe_names.index(name))
Update the models in the pipeline. docs (iterable): A batch of `Doc` objects. golds (iterable): A batch of `GoldParse` objects. drop (float): The droput rate. sgd (callable): An optimizer. RETURNS (dict): Results from the update. DOCS: https://spacy.io/api/language#update
def update(self, docs, golds, drop=0.0, sgd=None, losses=None, component_cfg=None): """Update the models in the pipeline. docs (iterable): A batch of `Doc` objects. golds (iterable): A batch of `GoldParse` objects. drop (float): The droput rate. sgd (callable): An optimizer. RETURNS (dict): Results from the update. DOCS: https://spacy.io/api/language#update """ if len(docs) != len(golds): raise IndexError(Errors.E009.format(n_docs=len(docs), n_golds=len(golds))) if len(docs) == 0: return if sgd is None: if self._optimizer is None: self._optimizer = create_default_optimizer(Model.ops) sgd = self._optimizer # Allow dict of args to GoldParse, instead of GoldParse objects. gold_objs = [] doc_objs = [] for doc, gold in zip(docs, golds): if isinstance(doc, basestring_): doc = self.make_doc(doc) if not isinstance(gold, GoldParse): gold = GoldParse(doc, **gold) doc_objs.append(doc) gold_objs.append(gold) golds = gold_objs docs = doc_objs grads = {} def get_grads(W, dW, key=None): grads[key] = (W, dW) get_grads.alpha = sgd.alpha get_grads.b1 = sgd.b1 get_grads.b2 = sgd.b2 pipes = list(self.pipeline) random.shuffle(pipes) if component_cfg is None: component_cfg = {} for name, proc in pipes: if not hasattr(proc, "update"): continue grads = {} kwargs = component_cfg.get(name, {}) kwargs.setdefault("drop", drop) proc.update(docs, golds, sgd=get_grads, losses=losses, **kwargs) for key, (W, dW) in grads.items(): sgd(W, dW, key=key)
Make a "rehearsal" update to the models in the pipeline, to prevent forgetting. Rehearsal updates run an initial copy of the model over some data, and update the model so its current predictions are more like the initial ones. This is useful for keeping a pre-trained model on-track, even if you're updating it with a smaller set of examples. docs (iterable): A batch of `Doc` objects. drop (float): The droput rate. sgd (callable): An optimizer. RETURNS (dict): Results from the update. EXAMPLE: >>> raw_text_batches = minibatch(raw_texts) >>> for labelled_batch in minibatch(zip(train_docs, train_golds)): >>> docs, golds = zip(*train_docs) >>> nlp.update(docs, golds) >>> raw_batch = [nlp.make_doc(text) for text in next(raw_text_batches)] >>> nlp.rehearse(raw_batch)
def rehearse(self, docs, sgd=None, losses=None, config=None): """Make a "rehearsal" update to the models in the pipeline, to prevent forgetting. Rehearsal updates run an initial copy of the model over some data, and update the model so its current predictions are more like the initial ones. This is useful for keeping a pre-trained model on-track, even if you're updating it with a smaller set of examples. docs (iterable): A batch of `Doc` objects. drop (float): The droput rate. sgd (callable): An optimizer. RETURNS (dict): Results from the update. EXAMPLE: >>> raw_text_batches = minibatch(raw_texts) >>> for labelled_batch in minibatch(zip(train_docs, train_golds)): >>> docs, golds = zip(*train_docs) >>> nlp.update(docs, golds) >>> raw_batch = [nlp.make_doc(text) for text in next(raw_text_batches)] >>> nlp.rehearse(raw_batch) """ # TODO: document if len(docs) == 0: return if sgd is None: if self._optimizer is None: self._optimizer = create_default_optimizer(Model.ops) sgd = self._optimizer docs = list(docs) for i, doc in enumerate(docs): if isinstance(doc, basestring_): docs[i] = self.make_doc(doc) pipes = list(self.pipeline) random.shuffle(pipes) if config is None: config = {} grads = {} def get_grads(W, dW, key=None): grads[key] = (W, dW) get_grads.alpha = sgd.alpha get_grads.b1 = sgd.b1 get_grads.b2 = sgd.b2 for name, proc in pipes: if not hasattr(proc, "rehearse"): continue grads = {} proc.rehearse(docs, sgd=get_grads, losses=losses, **config.get(name, {})) for key, (W, dW) in grads.items(): sgd(W, dW, key=key) return losses
Can be called before training to pre-process gold data. By default, it handles nonprojectivity and adds missing tags to the tag map. docs_golds (iterable): Tuples of `Doc` and `GoldParse` objects. YIELDS (tuple): Tuples of preprocessed `Doc` and `GoldParse` objects.
def preprocess_gold(self, docs_golds): """Can be called before training to pre-process gold data. By default, it handles nonprojectivity and adds missing tags to the tag map. docs_golds (iterable): Tuples of `Doc` and `GoldParse` objects. YIELDS (tuple): Tuples of preprocessed `Doc` and `GoldParse` objects. """ for name, proc in self.pipeline: if hasattr(proc, "preprocess_gold"): docs_golds = proc.preprocess_gold(docs_golds) for doc, gold in docs_golds: yield doc, gold
Allocate models, pre-process training data and acquire a trainer and optimizer. Used as a contextmanager. get_gold_tuples (function): Function returning gold data component_cfg (dict): Config parameters for specific components. **cfg: Config parameters. RETURNS: An optimizer. DOCS: https://spacy.io/api/language#begin_training
def begin_training(self, get_gold_tuples=None, sgd=None, component_cfg=None, **cfg): """Allocate models, pre-process training data and acquire a trainer and optimizer. Used as a contextmanager. get_gold_tuples (function): Function returning gold data component_cfg (dict): Config parameters for specific components. **cfg: Config parameters. RETURNS: An optimizer. DOCS: https://spacy.io/api/language#begin_training """ if get_gold_tuples is None: get_gold_tuples = lambda: [] # Populate vocab else: for _, annots_brackets in get_gold_tuples(): for annots, _ in annots_brackets: for word in annots[1]: _ = self.vocab[word] # noqa: F841 if cfg.get("device", -1) >= 0: util.use_gpu(cfg["device"]) if self.vocab.vectors.data.shape[1] >= 1: self.vocab.vectors.data = Model.ops.asarray(self.vocab.vectors.data) link_vectors_to_models(self.vocab) if self.vocab.vectors.data.shape[1]: cfg["pretrained_vectors"] = self.vocab.vectors.name if sgd is None: sgd = create_default_optimizer(Model.ops) self._optimizer = sgd if component_cfg is None: component_cfg = {} for name, proc in self.pipeline: if hasattr(proc, "begin_training"): kwargs = component_cfg.get(name, {}) kwargs.update(cfg) proc.begin_training( get_gold_tuples, pipeline=self.pipeline, sgd=self._optimizer, **kwargs ) return self._optimizer
Continue training a pre-trained model. Create and return an optimizer, and initialize "rehearsal" for any pipeline component that has a .rehearse() method. Rehearsal is used to prevent models from "forgetting" their initialised "knowledge". To perform rehearsal, collect samples of text you want the models to retain performance on, and call nlp.rehearse() with a batch of Doc objects.
def resume_training(self, sgd=None, **cfg): """Continue training a pre-trained model. Create and return an optimizer, and initialize "rehearsal" for any pipeline component that has a .rehearse() method. Rehearsal is used to prevent models from "forgetting" their initialised "knowledge". To perform rehearsal, collect samples of text you want the models to retain performance on, and call nlp.rehearse() with a batch of Doc objects. """ if cfg.get("device", -1) >= 0: util.use_gpu(cfg["device"]) if self.vocab.vectors.data.shape[1] >= 1: self.vocab.vectors.data = Model.ops.asarray(self.vocab.vectors.data) link_vectors_to_models(self.vocab) if self.vocab.vectors.data.shape[1]: cfg["pretrained_vectors"] = self.vocab.vectors.name if sgd is None: sgd = create_default_optimizer(Model.ops) self._optimizer = sgd for name, proc in self.pipeline: if hasattr(proc, "_rehearsal_model"): proc._rehearsal_model = deepcopy(proc.model) return self._optimizer
Replace weights of models in the pipeline with those provided in the params dictionary. Can be used as a contextmanager, in which case, models go back to their original weights after the block. params (dict): A dictionary of parameters keyed by model ID. **cfg: Config parameters. EXAMPLE: >>> with nlp.use_params(optimizer.averages): >>> nlp.to_disk('/tmp/checkpoint')
def use_params(self, params, **cfg): """Replace weights of models in the pipeline with those provided in the params dictionary. Can be used as a contextmanager, in which case, models go back to their original weights after the block. params (dict): A dictionary of parameters keyed by model ID. **cfg: Config parameters. EXAMPLE: >>> with nlp.use_params(optimizer.averages): >>> nlp.to_disk('/tmp/checkpoint') """ contexts = [ pipe.use_params(params) for name, pipe in self.pipeline if hasattr(pipe, "use_params") ] # TODO: Having trouble with contextlib # Workaround: these aren't actually context managers atm. for context in contexts: try: next(context) except StopIteration: pass yield for context in contexts: try: next(context) except StopIteration: pass
Process texts as a stream, and yield `Doc` objects in order. texts (iterator): A sequence of texts to process. as_tuples (bool): If set to True, inputs should be a sequence of (text, context) tuples. Output will then be a sequence of (doc, context) tuples. Defaults to False. batch_size (int): The number of texts to buffer. disable (list): Names of the pipeline components to disable. cleanup (bool): If True, unneeded strings are freed to control memory use. Experimental. component_cfg (dict): An optional dictionary with extra keyword arguments for specific components. YIELDS (Doc): Documents in the order of the original text. DOCS: https://spacy.io/api/language#pipe
def pipe( self, texts, as_tuples=False, n_threads=-1, batch_size=1000, disable=[], cleanup=False, component_cfg=None, ): """Process texts as a stream, and yield `Doc` objects in order. texts (iterator): A sequence of texts to process. as_tuples (bool): If set to True, inputs should be a sequence of (text, context) tuples. Output will then be a sequence of (doc, context) tuples. Defaults to False. batch_size (int): The number of texts to buffer. disable (list): Names of the pipeline components to disable. cleanup (bool): If True, unneeded strings are freed to control memory use. Experimental. component_cfg (dict): An optional dictionary with extra keyword arguments for specific components. YIELDS (Doc): Documents in the order of the original text. DOCS: https://spacy.io/api/language#pipe """ if n_threads != -1: deprecation_warning(Warnings.W016) if as_tuples: text_context1, text_context2 = itertools.tee(texts) texts = (tc[0] for tc in text_context1) contexts = (tc[1] for tc in text_context2) docs = self.pipe( texts, batch_size=batch_size, disable=disable, component_cfg=component_cfg, ) for doc, context in izip(docs, contexts): yield (doc, context) return docs = (self.make_doc(text) for text in texts) if component_cfg is None: component_cfg = {} for name, proc in self.pipeline: if name in disable: continue kwargs = component_cfg.get(name, {}) # Allow component_cfg to overwrite the top-level kwargs. kwargs.setdefault("batch_size", batch_size) if hasattr(proc, "pipe"): docs = proc.pipe(docs, **kwargs) else: # Apply the function, but yield the doc docs = _pipe(proc, docs, kwargs) # Track weakrefs of "recent" documents, so that we can see when they # expire from memory. When they do, we know we don't need old strings. # This way, we avoid maintaining an unbounded growth in string entries # in the string store. recent_refs = weakref.WeakSet() old_refs = weakref.WeakSet() # Keep track of the original string data, so that if we flush old strings, # we can recover the original ones. However, we only want to do this if we're # really adding strings, to save up-front costs. original_strings_data = None nr_seen = 0 for doc in docs: yield doc if cleanup: recent_refs.add(doc) if nr_seen < 10000: old_refs.add(doc) nr_seen += 1 elif len(old_refs) == 0: old_refs, recent_refs = recent_refs, old_refs if original_strings_data is None: original_strings_data = list(self.vocab.strings) else: keys, strings = self.vocab.strings._cleanup_stale_strings( original_strings_data ) self.vocab._reset_cache(keys, strings) self.tokenizer._reset_cache(keys) nr_seen = 0
Save the current state to a directory. If a model is loaded, this will include the model. path (unicode or Path): Path to a directory, which will be created if it doesn't exist. exclude (list): Names of components or serialization fields to exclude. DOCS: https://spacy.io/api/language#to_disk
def to_disk(self, path, exclude=tuple(), disable=None): """Save the current state to a directory. If a model is loaded, this will include the model. path (unicode or Path): Path to a directory, which will be created if it doesn't exist. exclude (list): Names of components or serialization fields to exclude. DOCS: https://spacy.io/api/language#to_disk """ if disable is not None: deprecation_warning(Warnings.W014) exclude = disable path = util.ensure_path(path) serializers = OrderedDict() serializers["tokenizer"] = lambda p: self.tokenizer.to_disk(p, exclude=["vocab"]) serializers["meta.json"] = lambda p: p.open("w").write(srsly.json_dumps(self.meta)) for name, proc in self.pipeline: if not hasattr(proc, "name"): continue if name in exclude: continue if not hasattr(proc, "to_disk"): continue serializers[name] = lambda p, proc=proc: proc.to_disk(p, exclude=["vocab"]) serializers["vocab"] = lambda p: self.vocab.to_disk(p) util.to_disk(path, serializers, exclude)
Loads state from a directory. Modifies the object in place and returns it. If the saved `Language` object contains a model, the model will be loaded. path (unicode or Path): A path to a directory. exclude (list): Names of components or serialization fields to exclude. RETURNS (Language): The modified `Language` object. DOCS: https://spacy.io/api/language#from_disk
def from_disk(self, path, exclude=tuple(), disable=None): """Loads state from a directory. Modifies the object in place and returns it. If the saved `Language` object contains a model, the model will be loaded. path (unicode or Path): A path to a directory. exclude (list): Names of components or serialization fields to exclude. RETURNS (Language): The modified `Language` object. DOCS: https://spacy.io/api/language#from_disk """ if disable is not None: deprecation_warning(Warnings.W014) exclude = disable path = util.ensure_path(path) deserializers = OrderedDict() deserializers["meta.json"] = lambda p: self.meta.update(srsly.read_json(p)) deserializers["vocab"] = lambda p: self.vocab.from_disk(p) and _fix_pretrained_vectors_name(self) deserializers["tokenizer"] = lambda p: self.tokenizer.from_disk(p, exclude=["vocab"]) for name, proc in self.pipeline: if name in exclude: continue if not hasattr(proc, "from_disk"): continue deserializers[name] = lambda p, proc=proc: proc.from_disk(p, exclude=["vocab"]) if not (path / "vocab").exists() and "vocab" not in exclude: # Convert to list here in case exclude is (default) tuple exclude = list(exclude) + ["vocab"] util.from_disk(path, deserializers, exclude) self._path = path return self
Serialize the current state to a binary string. exclude (list): Names of components or serialization fields to exclude. RETURNS (bytes): The serialized form of the `Language` object. DOCS: https://spacy.io/api/language#to_bytes
def to_bytes(self, exclude=tuple(), disable=None, **kwargs): """Serialize the current state to a binary string. exclude (list): Names of components or serialization fields to exclude. RETURNS (bytes): The serialized form of the `Language` object. DOCS: https://spacy.io/api/language#to_bytes """ if disable is not None: deprecation_warning(Warnings.W014) exclude = disable serializers = OrderedDict() serializers["vocab"] = lambda: self.vocab.to_bytes() serializers["tokenizer"] = lambda: self.tokenizer.to_bytes(exclude=["vocab"]) serializers["meta.json"] = lambda: srsly.json_dumps(self.meta) for name, proc in self.pipeline: if name in exclude: continue if not hasattr(proc, "to_bytes"): continue serializers[name] = lambda proc=proc: proc.to_bytes(exclude=["vocab"]) exclude = util.get_serialization_exclude(serializers, exclude, kwargs) return util.to_bytes(serializers, exclude)
Load state from a binary string. bytes_data (bytes): The data to load from. exclude (list): Names of components or serialization fields to exclude. RETURNS (Language): The `Language` object. DOCS: https://spacy.io/api/language#from_bytes
def from_bytes(self, bytes_data, exclude=tuple(), disable=None, **kwargs): """Load state from a binary string. bytes_data (bytes): The data to load from. exclude (list): Names of components or serialization fields to exclude. RETURNS (Language): The `Language` object. DOCS: https://spacy.io/api/language#from_bytes """ if disable is not None: deprecation_warning(Warnings.W014) exclude = disable deserializers = OrderedDict() deserializers["meta.json"] = lambda b: self.meta.update(srsly.json_loads(b)) deserializers["vocab"] = lambda b: self.vocab.from_bytes(b) and _fix_pretrained_vectors_name(self) deserializers["tokenizer"] = lambda b: self.tokenizer.from_bytes(b, exclude=["vocab"]) for name, proc in self.pipeline: if name in exclude: continue if not hasattr(proc, "from_bytes"): continue deserializers[name] = lambda b, proc=proc: proc.from_bytes(b, exclude=["vocab"]) exclude = util.get_serialization_exclude(deserializers, exclude, kwargs) util.from_bytes(bytes_data, deserializers, exclude) return self
Restore the pipeline to its state when DisabledPipes was created.
def restore(self): """Restore the pipeline to its state when DisabledPipes was created.""" current, self.nlp.pipeline = self.nlp.pipeline, self.original_pipeline unexpected = [name for name, pipe in current if not self.nlp.has_pipe(name)] if unexpected: # Don't change the pipeline if we're raising an error. self.nlp.pipeline = current raise ValueError(Errors.E008.format(names=unexpected)) self[:] = []
Yields all available rules. :type rules_paths: [Path] :rtype: Iterable[Rule]
def get_loaded_rules(rules_paths): """Yields all available rules. :type rules_paths: [Path] :rtype: Iterable[Rule] """ for path in rules_paths: if path.name != '__init__.py': rule = Rule.from_path(path) if rule.is_enabled: yield rule
Yields all rules import paths. :rtype: Iterable[Path]
def get_rules_import_paths(): """Yields all rules import paths. :rtype: Iterable[Path] """ # Bundled rules: yield Path(__file__).parent.joinpath('rules') # Rules defined by user: yield settings.user_dir.joinpath('rules') # Packages with third-party rules: for path in sys.path: for contrib_module in Path(path).glob('thefuck_contrib_*'): contrib_rules = contrib_module.joinpath('rules') if contrib_rules.is_dir(): yield contrib_rules
Returns all enabled rules. :rtype: [Rule]
def get_rules(): """Returns all enabled rules. :rtype: [Rule] """ paths = [rule_path for path in get_rules_import_paths() for rule_path in sorted(path.glob('*.py'))] return sorted(get_loaded_rules(paths), key=lambda rule: rule.priority)
Yields sorted commands without duplicates. :type corrected_commands: Iterable[thefuck.types.CorrectedCommand] :rtype: Iterable[thefuck.types.CorrectedCommand]
def organize_commands(corrected_commands): """Yields sorted commands without duplicates. :type corrected_commands: Iterable[thefuck.types.CorrectedCommand] :rtype: Iterable[thefuck.types.CorrectedCommand] """ try: first_command = next(corrected_commands) yield first_command except StopIteration: return without_duplicates = { command for command in sorted( corrected_commands, key=lambda command: command.priority) if command != first_command} sorted_commands = sorted( without_duplicates, key=lambda corrected_command: corrected_command.priority) logs.debug('Corrected commands: '.format( ', '.join(u'{}'.format(cmd) for cmd in [first_command] + sorted_commands))) for command in sorted_commands: yield command
Returns generator with sorted and unique corrected commands. :type command: thefuck.types.Command :rtype: Iterable[thefuck.types.CorrectedCommand]
def get_corrected_commands(command): """Returns generator with sorted and unique corrected commands. :type command: thefuck.types.Command :rtype: Iterable[thefuck.types.CorrectedCommand] """ corrected_commands = ( corrected for rule in get_rules() if rule.is_match(command) for corrected in rule.get_corrected_commands(command)) return organize_commands(corrected_commands)
Fixes previous command. Used when `thefuck` called without arguments.
def fix_command(known_args): """Fixes previous command. Used when `thefuck` called without arguments.""" settings.init(known_args) with logs.debug_time('Total'): logs.debug(u'Run with settings: {}'.format(pformat(settings))) raw_command = _get_raw_command(known_args) try: command = types.Command.from_raw_script(raw_command) except EmptyCommand: logs.debug('Empty command, nothing to do') return corrected_commands = get_corrected_commands(command) selected_command = select_command(corrected_commands) if selected_command: selected_command.run(command) else: sys.exit(1)
Gets command output from shell logger.
def get_output(script): """Gets command output from shell logger.""" with logs.debug_time(u'Read output from external shell logger'): commands = _get_last_n(const.SHELL_LOGGER_LIMIT) for command in commands: if command['command'] == script: lines = _get_output_lines(command['output']) output = '\n'.join(lines).strip() return output else: logs.warn("Output isn't available in shell logger") return None
Returns list of history entries.
def _get_history_lines(self): """Returns list of history entries.""" history_file_name = self._get_history_file_name() if os.path.isfile(history_file_name): with io.open(history_file_name, 'r', encoding='utf-8', errors='ignore') as history_file: lines = history_file.readlines() if settings.history_limit: lines = lines[-settings.history_limit:] for line in lines: prepared = self._script_from_history(line) \ .strip() if prepared: yield prepared
Split the command using shell-like syntax.
def split_command(self, command): """Split the command using shell-like syntax.""" encoded = self.encode_utf8(command) try: splitted = [s.replace("??", "\\ ") for s in shlex.split(encoded.replace('\\ ', '??'))] except ValueError: splitted = encoded.split(' ') return self.decode_utf8(splitted)
Return a shell-escaped version of the string s.
def quote(self, s): """Return a shell-escaped version of the string s.""" if six.PY2: from pipes import quote else: from shlex import quote return quote(s)
Returns the name and version of the current shell
def info(self): """Returns the name and version of the current shell""" proc = Popen(['fish', '--version'], stdout=PIPE, stderr=DEVNULL) version = proc.stdout.read().decode('utf-8').split()[-1] return u'Fish Shell {}'.format(version)
Puts command script to shell history.
def _put_to_history(self, command_script): """Puts command script to shell history.""" history_file_name = self._get_history_file_name() if os.path.isfile(history_file_name): with open(history_file_name, 'a') as history: entry = self._get_history_line(command_script) if six.PY2: history.write(entry.encode('utf-8')) else: history.write(entry)
To get brew default commands on local environment
def _get_brew_commands(brew_path_prefix): """To get brew default commands on local environment""" brew_cmd_path = brew_path_prefix + BREW_CMD_PATH return [name[:-3] for name in os.listdir(brew_cmd_path) if name.endswith(('.rb', '.sh'))]
To get tap's specific commands https://github.com/Homebrew/homebrew/blob/master/Library/brew.rb#L115
def _get_brew_tap_specific_commands(brew_path_prefix): """To get tap's specific commands https://github.com/Homebrew/homebrew/blob/master/Library/brew.rb#L115""" commands = [] brew_taps_path = brew_path_prefix + TAP_PATH for user in _get_directory_names_only(brew_taps_path): taps = _get_directory_names_only(brew_taps_path + '/%s' % user) # Brew Taps's naming rule # https://github.com/Homebrew/homebrew/blob/master/share/doc/homebrew/brew-tap.md#naming-conventions-and-limitations taps = (tap for tap in taps if tap.startswith('homebrew-')) for tap in taps: tap_cmd_path = brew_taps_path + TAP_CMD_PATH % (user, tap) if os.path.isdir(tap_cmd_path): commands += (name.replace('brew-', '').replace('.rb', '') for name in os.listdir(tap_cmd_path) if _is_brew_tap_cmd_naming(name)) return commands
Returns the name and version of the current shell
def info(self): """Returns the name and version of the current shell""" proc = Popen(['zsh', '-c', 'echo $ZSH_VERSION'], stdout=PIPE, stderr=DEVNULL) version = proc.stdout.read().decode('utf-8').strip() return u'ZSH {}'.format(version)
Resolves git aliases and supports testing for both git and hub.
def git_support(fn, command): """Resolves git aliases and supports testing for both git and hub.""" # supports GitHub's `hub` command # which is recommended to be used with `alias git=hub` # but at this point, shell aliases have already been resolved if not is_app(command, 'git', 'hub'): return False # perform git aliases expansion if 'trace: alias expansion:' in command.output: search = re.search("trace: alias expansion: ([^ ]*) => ([^\n]*)", command.output) alias = search.group(1) # by default git quotes everything, for example: # 'commit' '--amend' # which is surprising and does not allow to easily test for # eg. 'git commit' expansion = ' '.join(shell.quote(part) for part in shell.split_command(search.group(2))) new_script = command.script.replace(alias, expansion) command = command.update(script=new_script) return fn(command)
Yields actions for pressed keys.
def read_actions(): """Yields actions for pressed keys.""" while True: key = get_key() # Handle arrows, j/k (qwerty), and n/e (colemak) if key in (const.KEY_UP, const.KEY_CTRL_N, 'k', 'e'): yield const.ACTION_PREVIOUS elif key in (const.KEY_DOWN, const.KEY_CTRL_P, 'j', 'n'): yield const.ACTION_NEXT elif key in (const.KEY_CTRL_C, 'q'): yield const.ACTION_ABORT elif key in ('\n', '\r'): yield const.ACTION_SELECT
Returns: - the first command when confirmation disabled; - None when ctrl+c pressed; - selected command. :type corrected_commands: Iterable[thefuck.types.CorrectedCommand] :rtype: thefuck.types.CorrectedCommand | None
def select_command(corrected_commands): """Returns: - the first command when confirmation disabled; - None when ctrl+c pressed; - selected command. :type corrected_commands: Iterable[thefuck.types.CorrectedCommand] :rtype: thefuck.types.CorrectedCommand | None """ try: selector = CommandSelector(corrected_commands) except NoRuleMatched: logs.failed('No fucks given' if get_alias() == 'fuck' else 'Nothing found') return if not settings.require_confirmation: logs.show_corrected_command(selector.value) return selector.value logs.confirm_text(selector.value) for action in read_actions(): if action == const.ACTION_SELECT: sys.stderr.write('\n') return selector.value elif action == const.ACTION_ABORT: logs.failed('\nAborted') return elif action == const.ACTION_PREVIOUS: selector.previous() logs.confirm_text(selector.value) elif action == const.ACTION_NEXT: selector.next() logs.confirm_text(selector.value)
Create a spawned process. Modified version of pty.spawn with terminal size support.
def _spawn(shell, master_read): """Create a spawned process. Modified version of pty.spawn with terminal size support. """ pid, master_fd = pty.fork() if pid == pty.CHILD: os.execlp(shell, shell) try: mode = tty.tcgetattr(pty.STDIN_FILENO) tty.setraw(pty.STDIN_FILENO) restore = True except tty.error: # This is the same as termios.error restore = False _set_pty_size(master_fd) signal.signal(signal.SIGWINCH, lambda *_: _set_pty_size(master_fd)) try: pty._copy(master_fd, master_read, pty._read) except OSError: if restore: tty.tcsetattr(pty.STDIN_FILENO, tty.TCSAFLUSH, mode) os.close(master_fd) return os.waitpid(pid, 0)[1]
Logs shell output to the `output`. Works like unix script command with `-f` flag.
def shell_logger(output): """Logs shell output to the `output`. Works like unix script command with `-f` flag. """ if not os.environ.get('SHELL'): logs.warn("Shell logger doesn't support your platform.") sys.exit(1) fd = os.open(output, os.O_CREAT | os.O_TRUNC | os.O_RDWR) os.write(fd, b'\x00' * const.LOG_SIZE_IN_BYTES) buffer = mmap.mmap(fd, const.LOG_SIZE_IN_BYTES, mmap.MAP_SHARED, mmap.PROT_WRITE) return_code = _spawn(os.environ['SHELL'], partial(_read, buffer)) sys.exit(return_code)
Get output of the script. :param script: Console script. :type script: str :param expanded: Console script with expanded aliases. :type expanded: str :rtype: str
def get_output(script, expanded): """Get output of the script. :param script: Console script. :type script: str :param expanded: Console script with expanded aliases. :type expanded: str :rtype: str """ if shell_logger.is_available(): return shell_logger.get_output(script) if settings.instant_mode: return read_log.get_output(script) else: return rerun.get_output(script, expanded)
Adds arguments to parser.
def _add_arguments(self): """Adds arguments to parser.""" self._parser.add_argument( '-v', '--version', action='store_true', help="show program's version number and exit") self._parser.add_argument( '-a', '--alias', nargs='?', const=get_alias(), help='[custom-alias-name] prints alias for current shell') self._parser.add_argument( '-l', '--shell-logger', action='store', help='log shell output to the file') self._parser.add_argument( '--enable-experimental-instant-mode', action='store_true', help='enable experimental instant mode, use on your own risk') self._parser.add_argument( '-h', '--help', action='store_true', help='show this help message and exit') self._add_conflicting_arguments() self._parser.add_argument( '-d', '--debug', action='store_true', help='enable debug output') self._parser.add_argument( '--force-command', action='store', help=SUPPRESS) self._parser.add_argument( 'command', nargs='*', help='command that should be fixed')
It's too dangerous to use `-y` and `-r` together.
def _add_conflicting_arguments(self): """It's too dangerous to use `-y` and `-r` together.""" group = self._parser.add_mutually_exclusive_group() group.add_argument( '-y', '--yes', '--yeah', action='store_true', help='execute fixed command without confirmation') group.add_argument( '-r', '--repeat', action='store_true', help='repeat on failure')
Prepares arguments by: - removing placeholder and moving arguments after it to beginning, we need this to distinguish arguments from `command` with ours; - adding `--` before `command`, so our parse would ignore arguments of `command`.
def _prepare_arguments(self, argv): """Prepares arguments by: - removing placeholder and moving arguments after it to beginning, we need this to distinguish arguments from `command` with ours; - adding `--` before `command`, so our parse would ignore arguments of `command`. """ if ARGUMENT_PLACEHOLDER in argv: index = argv.index(ARGUMENT_PLACEHOLDER) return argv[index + 1:] + ['--'] + argv[:index] elif argv and not argv[0].startswith('-') and argv[0] != '--': return ['--'] + argv else: return argv
Get custom npm scripts.
def get_scripts(): """Get custom npm scripts.""" proc = Popen(['npm', 'run-script'], stdout=PIPE) should_yeild = False for line in proc.stdout.readlines(): line = line.decode() if 'available via `npm run-script`:' in line: should_yeild = True continue if should_yeild and re.match(r'^ [^ ]+', line): yield line.strip().split(' ')[0]
Fills `settings` with values from `settings.py` and env.
def init(self, args=None): """Fills `settings` with values from `settings.py` and env.""" from .logs import exception self._setup_user_dir() self._init_settings_file() try: self.update(self._settings_from_file()) except Exception: exception("Can't load settings from file", sys.exc_info()) try: self.update(self._settings_from_env()) except Exception: exception("Can't load settings from env", sys.exc_info()) self.update(self._settings_from_args(args))
Returns Path object representing the user config resource
def _get_user_dir_path(self): """Returns Path object representing the user config resource""" xdg_config_home = os.environ.get('XDG_CONFIG_HOME', '~/.config') user_dir = Path(xdg_config_home, 'thefuck').expanduser() legacy_user_dir = Path('~', '.thefuck').expanduser() # For backward compatibility use legacy '~/.thefuck' if it exists: if legacy_user_dir.is_dir(): warn(u'Config path {} is deprecated. Please move to {}'.format( legacy_user_dir, user_dir)) return legacy_user_dir else: return user_dir
Returns user config dir, create it when it doesn't exist.
def _setup_user_dir(self): """Returns user config dir, create it when it doesn't exist.""" user_dir = self._get_user_dir_path() rules_dir = user_dir.joinpath('rules') if not rules_dir.is_dir(): rules_dir.mkdir(parents=True) self.user_dir = user_dir
Loads settings from file.
def _settings_from_file(self): """Loads settings from file.""" settings = load_source( 'settings', text_type(self.user_dir.joinpath('settings.py'))) return {key: getattr(settings, key) for key in const.DEFAULT_SETTINGS.keys() if hasattr(settings, key)}
Transforms rules list from env-string to python.
def _rules_from_env(self, val): """Transforms rules list from env-string to python.""" val = val.split(':') if 'DEFAULT_RULES' in val: val = const.DEFAULT_RULES + [rule for rule in val if rule != 'DEFAULT_RULES'] return val
Gets priority pairs from env.
def _priority_from_env(self, val): """Gets priority pairs from env.""" for part in val.split(':'): try: rule, priority = part.split('=') yield rule, int(priority) except ValueError: continue
Transforms env-strings to python.
def _val_from_env(self, env, attr): """Transforms env-strings to python.""" val = os.environ[env] if attr in ('rules', 'exclude_rules'): return self._rules_from_env(val) elif attr == 'priority': return dict(self._priority_from_env(val)) elif attr in ('wait_command', 'history_limit', 'wait_slow_command', 'num_close_matches'): return int(val) elif attr in ('require_confirmation', 'no_colors', 'debug', 'alter_history', 'instant_mode'): return val.lower() == 'true' elif attr == 'slow_commands': return val.split(':') else: return val
Loads settings from env.
def _settings_from_env(self): """Loads settings from env.""" return {attr: self._val_from_env(env, attr) for env, attr in const.ENV_TO_ATTR.items() if env in os.environ}
Loads settings from args.
def _settings_from_args(self, args): """Loads settings from args.""" if not args: return {} from_args = {} if args.yes: from_args['require_confirmation'] = not args.yes if args.debug: from_args['debug'] = args.debug if args.repeat: from_args['repeat'] = args.repeat return from_args
When arguments order is wrong first argument will be destination.
def _get_destination(script_parts): """When arguments order is wrong first argument will be destination.""" for part in script_parts: if part not in {'ln', '-s', '--symbolic'} and os.path.exists(part): return part
Removes sudo before calling fn and adds it after.
def sudo_support(fn, command): """Removes sudo before calling fn and adds it after.""" if not command.script.startswith('sudo '): return fn(command) result = fn(command.update(script=command.script[5:])) if result and isinstance(result, six.string_types): return u'sudo {}'.format(result) elif isinstance(result, list): return [u'sudo {}'.format(x) for x in result] else: return result
Tries to kill the process otherwise just logs a debug message, the process will be killed when thefuck terminates. :type proc: Process
def _kill_process(proc): """Tries to kill the process otherwise just logs a debug message, the process will be killed when thefuck terminates. :type proc: Process """ try: proc.kill() except AccessDenied: logs.debug(u'Rerun: process PID {} ({}) could not be terminated'.format( proc.pid, proc.exe()))
Returns `True` if we can get output of the command in the `settings.wait_command` time. Command will be killed if it wasn't finished in the time. :type popen: Popen :rtype: bool
def _wait_output(popen, is_slow): """Returns `True` if we can get output of the command in the `settings.wait_command` time. Command will be killed if it wasn't finished in the time. :type popen: Popen :rtype: bool """ proc = Process(popen.pid) try: proc.wait(settings.wait_slow_command if is_slow else settings.wait_command) return True except TimeoutExpired: for child in proc.children(recursive=True): _kill_process(child) _kill_process(proc) return False
Runs the script and obtains stdin/stderr. :type script: str :type expanded: str :rtype: str | None
def get_output(script, expanded): """Runs the script and obtains stdin/stderr. :type script: str :type expanded: str :rtype: str | None """ env = dict(os.environ) env.update(settings.env) is_slow = shlex.split(expanded) in settings.slow_commands with logs.debug_time(u'Call: {}; with env: {}; is slow: '.format( script, env, is_slow)): result = Popen(expanded, shell=True, stdin=PIPE, stdout=PIPE, stderr=STDOUT, env=env) if _wait_output(result, is_slow): output = result.stdout.read().decode('utf-8') logs.debug(u'Received output: {}'.format(output)) return output else: logs.debug(u'Execution timed out!') return None
Reads script output from log. :type script: str :rtype: str | None
def get_output(script): """Reads script output from log. :type script: str :rtype: str | None """ if six.PY2: logs.warn('Experimental instant mode is Python 3+ only') return None if 'THEFUCK_OUTPUT_LOG' not in os.environ: logs.warn("Output log isn't specified") return None if const.USER_COMMAND_MARK not in os.environ.get('PS1', ''): logs.warn( "PS1 doesn't contain user command mark, please ensure " "that PS1 is not changed after The Fuck alias initialization") return None try: with logs.debug_time(u'Read output from log'): fd = os.open(os.environ['THEFUCK_OUTPUT_LOG'], os.O_RDONLY) buffer = mmap.mmap(fd, const.LOG_SIZE_IN_BYTES, mmap.MAP_SHARED, mmap.PROT_READ) _skip_old_lines(buffer) lines = _get_output_lines(script, buffer) output = '\n'.join(lines).strip() logs.debug(u'Received output: {}'.format(output)) return output except OSError: logs.warn("Can't read output log") return None except ScriptNotInLog: logs.warn("Script not found in output log") return None
Gets the packages that provide the given command using `pkgfile`. If the command is of the form `sudo foo`, searches for the `foo` command instead.
def get_pkgfile(command): """ Gets the packages that provide the given command using `pkgfile`. If the command is of the form `sudo foo`, searches for the `foo` command instead. """ try: command = command.strip() if command.startswith('sudo '): command = command[5:] command = command.split(" ")[0] packages = subprocess.check_output( ['pkgfile', '-b', '-v', command], universal_newlines=True, stderr=utils.DEVNULL ).splitlines() return [package.split()[0] for package in packages] except subprocess.CalledProcessError as err: if err.returncode == 1 and err.output == "": return [] else: raise err
Returns a list of the child directories of the given parent directory
def _get_sub_dirs(parent): """Returns a list of the child directories of the given parent directory""" return [child for child in os.listdir(parent) if os.path.isdir(os.path.join(parent, child))]
Attempt to rebuild the path string by spellchecking the directories. If it fails (i.e. no directories are a close enough match), then it defaults to the rules of cd_mkdir. Change sensitivity by changing MAX_ALLOWED_DIFF. Default value is 0.6
def get_new_command(command): """ Attempt to rebuild the path string by spellchecking the directories. If it fails (i.e. no directories are a close enough match), then it defaults to the rules of cd_mkdir. Change sensitivity by changing MAX_ALLOWED_DIFF. Default value is 0.6 """ dest = command.script_parts[1].split(os.sep) if dest[-1] == '': dest = dest[:-1] if dest[0] == '': cwd = os.sep dest = dest[1:] elif six.PY2: cwd = os.getcwdu() else: cwd = os.getcwd() for directory in dest: if directory == ".": continue elif directory == "..": cwd = os.path.split(cwd)[0] continue best_matches = get_close_matches(directory, _get_sub_dirs(cwd), cutoff=MAX_ALLOWED_DIFF) if best_matches: cwd = os.path.join(cwd, best_matches[0]) else: return cd_mkdir.get_new_command(command) return u'cd "{0}"'.format(cwd)
Returns new command with replaced fields. :rtype: Command
def update(self, **kwargs): """Returns new command with replaced fields. :rtype: Command """ kwargs.setdefault('script', self.script) kwargs.setdefault('output', self.output) return Command(**kwargs)
Creates instance of `Command` from a list of script parts. :type raw_script: [basestring] :rtype: Command :raises: EmptyCommand
def from_raw_script(cls, raw_script): """Creates instance of `Command` from a list of script parts. :type raw_script: [basestring] :rtype: Command :raises: EmptyCommand """ script = format_raw_script(raw_script) if not script: raise EmptyCommand expanded = shell.from_shell(script) output = get_output(script, expanded) return cls(expanded, output)
Creates rule instance from path. :type path: pathlib.Path :rtype: Rule
def from_path(cls, path): """Creates rule instance from path. :type path: pathlib.Path :rtype: Rule """ name = path.name[:-3] with logs.debug_time(u'Importing rule: {};'.format(name)): rule_module = load_source(name, str(path)) priority = getattr(rule_module, 'priority', DEFAULT_PRIORITY) return cls(name, rule_module.match, rule_module.get_new_command, getattr(rule_module, 'enabled_by_default', True), getattr(rule_module, 'side_effect', None), settings.priority.get(name, priority), getattr(rule_module, 'requires_output', True))
Returns `True` when rule enabled. :rtype: bool
def is_enabled(self): """Returns `True` when rule enabled. :rtype: bool """ if self.name in settings.exclude_rules: return False elif self.name in settings.rules: return True elif self.enabled_by_default and ALL_ENABLED in settings.rules: return True else: return False
Returns `True` if rule matches the command. :type command: Command :rtype: bool
def is_match(self, command): """Returns `True` if rule matches the command. :type command: Command :rtype: bool """ if command.output is None and self.requires_output: return False try: with logs.debug_time(u'Trying rule: {};'.format(self.name)): if self.match(command): return True except Exception: logs.rule_failed(self, sys.exc_info())
Returns generator with corrected commands. :type command: Command :rtype: Iterable[CorrectedCommand]
def get_corrected_commands(self, command): """Returns generator with corrected commands. :type command: Command :rtype: Iterable[CorrectedCommand] """ new_commands = self.get_new_command(command) if not isinstance(new_commands, list): new_commands = (new_commands,) for n, new_command in enumerate(new_commands): yield CorrectedCommand(script=new_command, side_effect=self.side_effect, priority=(n + 1) * self.priority)
Returns fixed commands script. If `settings.repeat` is `True`, appends command with second attempt of running fuck in case fixed command fails again.
def _get_script(self): """Returns fixed commands script. If `settings.repeat` is `True`, appends command with second attempt of running fuck in case fixed command fails again. """ if settings.repeat: repeat_fuck = '{} --repeat {}--force-command {}'.format( get_alias(), '--debug ' if settings.debug else '', shell.quote(self.script)) return shell.or_(self.script, repeat_fuck) else: return self.script
Runs command from rule for passed command. :type old_cmd: Command
def run(self, old_cmd): """Runs command from rule for passed command. :type old_cmd: Command """ if self.side_effect: self.side_effect(old_cmd, self.script) if settings.alter_history: shell.put_to_history(self.script) # This depends on correct setting of PYTHONIOENCODING by the alias: logs.debug(u'PYTHONIOENCODING: {}'.format( os.environ.get('PYTHONIOENCODING', '!!not-set!!'))) print(self._get_script())
Returns parent process pid.
def _get_shell_pid(): """Returns parent process pid.""" proc = Process(os.getpid()) try: return proc.parent().pid except TypeError: return proc.parent.pid
Records shell pid to tracker file.
def _record_first_run(): """Records shell pid to tracker file.""" info = {'pid': _get_shell_pid(), 'time': time.time()} mode = 'wb' if six.PY2 else 'w' with _get_not_configured_usage_tracker_path().open(mode) as tracker: json.dump(info, tracker)
Returns `True` when we know that `fuck` called second time.
def _is_second_run(): """Returns `True` when we know that `fuck` called second time.""" tracker_path = _get_not_configured_usage_tracker_path() if not tracker_path.exists(): return False current_pid = _get_shell_pid() with tracker_path.open('r') as tracker: try: info = json.load(tracker) except ValueError: return False if not (isinstance(info, dict) and info.get('pid') == current_pid): return False return (_get_previous_command() == 'fuck' or time.time() - info.get('time', 0) < const.CONFIGURATION_TIMEOUT)
Returns `True` when alias already in shell config.
def _is_already_configured(configuration_details): """Returns `True` when alias already in shell config.""" path = Path(configuration_details.path).expanduser() with path.open('r') as shell_config: return configuration_details.content in shell_config.read()
Adds alias to shell config.
def _configure(configuration_details): """Adds alias to shell config.""" path = Path(configuration_details.path).expanduser() with path.open('a') as shell_config: shell_config.write(u'\n') shell_config.write(configuration_details.content) shell_config.write(u'\n')
Shows useful information about how-to configure alias on a first run and configure automatically on a second. It'll be only visible when user type fuck and when alias isn't configured.
def main(): """Shows useful information about how-to configure alias on a first run and configure automatically on a second. It'll be only visible when user type fuck and when alias isn't configured. """ settings.init() configuration_details = shell.how_to_configure() if ( configuration_details and configuration_details.can_configure_automatically ): if _is_already_configured(configuration_details): logs.already_configured(configuration_details) return elif _is_second_run(): _configure(configuration_details) logs.configured_successfully(configuration_details) return else: _record_first_run() logs.how_to_configure_alias(configuration_details)
Caches previous calls to the function.
def memoize(fn): """Caches previous calls to the function.""" memo = {} @wraps(fn) def wrapper(*args, **kwargs): if not memoize.disabled: key = pickle.dumps((args, kwargs)) if key not in memo: memo[key] = fn(*args, **kwargs) value = memo[key] else: # Memoize is disabled, call the function value = fn(*args, **kwargs) return value return wrapper
Adds default values to settings if it not presented. Usage: @default_settings({'apt': '/usr/bin/apt'}) def match(command): print(settings.apt)
def default_settings(params): """Adds default values to settings if it not presented. Usage: @default_settings({'apt': '/usr/bin/apt'}) def match(command): print(settings.apt) """ def _default_settings(fn, command): for k, w in params.items(): settings.setdefault(k, w) return fn(command) return decorator(_default_settings)
Returns closest match or just first from possibilities.
def get_closest(word, possibilities, cutoff=0.6, fallback_to_first=True): """Returns closest match or just first from possibilities.""" possibilities = list(possibilities) try: return difflib_get_close_matches(word, possibilities, 1, cutoff)[0] except IndexError: if fallback_to_first: return possibilities[0]
Overrides `difflib.get_close_match` to controle argument `n`.
def get_close_matches(word, possibilities, n=None, cutoff=0.6): """Overrides `difflib.get_close_match` to controle argument `n`.""" if n is None: n = settings.num_close_matches return difflib_get_close_matches(word, possibilities, n, cutoff)
Replaces command line argument.
def replace_argument(script, from_, to): """Replaces command line argument.""" replaced_in_the_end = re.sub(u' {}$'.format(re.escape(from_)), u' {}'.format(to), script, count=1) if replaced_in_the_end != script: return replaced_in_the_end else: return script.replace( u' {} '.format(from_), u' {} '.format(to), 1)
Helper for *_no_command rules.
def replace_command(command, broken, matched): """Helper for *_no_command rules.""" new_cmds = get_close_matches(broken, matched, cutoff=0.1) return [replace_argument(command.script, broken, new_cmd.strip()) for new_cmd in new_cmds]
Returns `True` if command is call to one of passed app names.
def is_app(command, *app_names, **kwargs): """Returns `True` if command is call to one of passed app names.""" at_least = kwargs.pop('at_least', 0) if kwargs: raise TypeError("got an unexpected keyword argument '{}'".format(kwargs.keys())) if len(command.script_parts) > at_least: return command.script_parts[0] in app_names return False
Specifies that matching script is for on of app names.
def for_app(*app_names, **kwargs): """Specifies that matching script is for on of app names.""" def _for_app(fn, command): if is_app(command, *app_names, **kwargs): return fn(command) else: return False return decorator(_for_app)
Caches function result in temporary file. Cache will be expired when modification date of files from `depends_on` will be changed. Only functions should be wrapped in `cache`, not methods.
def cache(*depends_on): """Caches function result in temporary file. Cache will be expired when modification date of files from `depends_on` will be changed. Only functions should be wrapped in `cache`, not methods. """ def cache_decorator(fn): @memoize @wraps(fn) def wrapper(*args, **kwargs): if cache.disabled: return fn(*args, **kwargs) else: return _cache.get_value(fn, depends_on, args, kwargs) return wrapper return cache_decorator
Creates single script from a list of script parts. :type raw_script: [basestring] :rtype: basestring
def format_raw_script(raw_script): """Creates single script from a list of script parts. :type raw_script: [basestring] :rtype: basestring """ if six.PY2: script = ' '.join(arg.decode('utf-8') for arg in raw_script) else: script = ' '.join(raw_script) return script.strip()
Decides actions given observations information, and takes them in environment. :param brain_info: A dictionary of brain names and BrainInfo from environment. :return: an ActionInfo containing action, memories, values and an object to be passed to add experiences
def get_action(self, brain_info: BrainInfo) -> ActionInfo: """ Decides actions given observations information, and takes them in environment. :param brain_info: A dictionary of brain names and BrainInfo from environment. :return: an ActionInfo containing action, memories, values and an object to be passed to add experiences """ if len(brain_info.agents) == 0: return ActionInfo([], [], [], None, None) run_out = self.evaluate(brain_info) return ActionInfo( action=run_out.get('action'), memory=run_out.get('memory_out'), text=None, value=run_out.get('value'), outputs=run_out )
Executes model. :param feed_dict: Input dictionary mapping nodes to input data. :param out_dict: Output dictionary mapping names to nodes. :return: Dictionary mapping names to input data.
def _execute_model(self, feed_dict, out_dict): """ Executes model. :param feed_dict: Input dictionary mapping nodes to input data. :param out_dict: Output dictionary mapping names to nodes. :return: Dictionary mapping names to input data. """ network_out = self.sess.run(list(out_dict.values()), feed_dict=feed_dict) run_out = dict(zip(list(out_dict.keys()), network_out)) return run_out
Gets current model step. :return: current model step.
def get_current_step(self): """ Gets current model step. :return: current model step. """ step = self.sess.run(self.model.global_step) return step
Saves the model :param steps: The number of steps the model was trained for :return:
def save_model(self, steps): """ Saves the model :param steps: The number of steps the model was trained for :return: """ with self.graph.as_default(): last_checkpoint = self.model_path + '/model-' + str(steps) + '.cptk' self.saver.save(self.sess, last_checkpoint) tf.train.write_graph(self.graph, self.model_path, 'raw_graph_def.pb', as_text=False)
Exports latest saved model to .nn format for Unity embedding.
def export_model(self): """ Exports latest saved model to .nn format for Unity embedding. """ with self.graph.as_default(): target_nodes = ','.join(self._process_graph()) ckpt = tf.train.get_checkpoint_state(self.model_path) freeze_graph.freeze_graph( input_graph=self.model_path + '/raw_graph_def.pb', input_binary=True, input_checkpoint=ckpt.model_checkpoint_path, output_node_names=target_nodes, output_graph=(self.model_path + '/frozen_graph_def.pb'), clear_devices=True, initializer_nodes='', input_saver='', restore_op_name='save/restore_all', filename_tensor_name='save/Const:0') tf2bc.convert(self.model_path + '/frozen_graph_def.pb', self.model_path + '.nn') logger.info('Exported ' + self.model_path + '.nn file')
Gets the list of the output nodes present in the graph for inference :return: list of node names
def _process_graph(self): """ Gets the list of the output nodes present in the graph for inference :return: list of node names """ all_nodes = [x.name for x in self.graph.as_graph_def().node] nodes = [x for x in all_nodes if x in self.possible_output_nodes] logger.info('List of nodes to export for brain :' + self.brain.brain_name) for n in nodes: logger.info('\t' + n) return nodes
Resets all the local local_buffers
def reset_local_buffers(self): """ Resets all the local local_buffers """ agent_ids = list(self.keys()) for k in agent_ids: self[k].reset_agent()
Appends the buffer of an agent to the update buffer. :param agent_id: The id of the agent which data will be appended :param key_list: The fields that must be added. If None: all fields will be appended. :param batch_size: The number of elements that must be appended. If None: All of them will be. :param training_length: The length of the samples that must be appended. If None: only takes one element.
def append_update_buffer(self, agent_id, key_list=None, batch_size=None, training_length=None): """ Appends the buffer of an agent to the update buffer. :param agent_id: The id of the agent which data will be appended :param key_list: The fields that must be added. If None: all fields will be appended. :param batch_size: The number of elements that must be appended. If None: All of them will be. :param training_length: The length of the samples that must be appended. If None: only takes one element. """ if key_list is None: key_list = self[agent_id].keys() if not self[agent_id].check_length(key_list): raise BufferException("The length of the fields {0} for agent {1} where not of same length" .format(key_list, agent_id)) for field_key in key_list: self.update_buffer[field_key].extend( self[agent_id][field_key].get_batch(batch_size=batch_size, training_length=training_length) )
Appends the buffer of all agents to the update buffer. :param key_list: The fields that must be added. If None: all fields will be appended. :param batch_size: The number of elements that must be appended. If None: All of them will be. :param training_length: The length of the samples that must be appended. If None: only takes one element.
def append_all_agent_batch_to_update_buffer(self, key_list=None, batch_size=None, training_length=None): """ Appends the buffer of all agents to the update buffer. :param key_list: The fields that must be added. If None: all fields will be appended. :param batch_size: The number of elements that must be appended. If None: All of them will be. :param training_length: The length of the samples that must be appended. If None: only takes one element. """ for agent_id in self.keys(): self.append_update_buffer(agent_id, key_list, batch_size, training_length)
Launches training session. :param process_queue: Queue used to send signal back to main. :param sub_id: Unique id for training session. :param run_seed: Random seed used for training. :param run_options: Command line arguments for training.
def run_training(sub_id: int, run_seed: int, run_options, process_queue): """ Launches training session. :param process_queue: Queue used to send signal back to main. :param sub_id: Unique id for training session. :param run_seed: Random seed used for training. :param run_options: Command line arguments for training. """ # Docker Parameters docker_target_name = (run_options['--docker-target-name'] if run_options['--docker-target-name'] != 'None' else None) # General parameters env_path = (run_options['--env'] if run_options['--env'] != 'None' else None) run_id = run_options['--run-id'] load_model = run_options['--load'] train_model = run_options['--train'] save_freq = int(run_options['--save-freq']) keep_checkpoints = int(run_options['--keep-checkpoints']) base_port = int(run_options['--base-port']) num_envs = int(run_options['--num-envs']) curriculum_folder = (run_options['--curriculum'] if run_options['--curriculum'] != 'None' else None) lesson = int(run_options['--lesson']) fast_simulation = not bool(run_options['--slow']) no_graphics = run_options['--no-graphics'] trainer_config_path = run_options['<trainer-config-path>'] # Recognize and use docker volume if one is passed as an argument if not docker_target_name: model_path = './models/{run_id}-{sub_id}'.format(run_id=run_id, sub_id=sub_id) summaries_dir = './summaries' else: trainer_config_path = \ '/{docker_target_name}/{trainer_config_path}'.format( docker_target_name=docker_target_name, trainer_config_path=trainer_config_path) if curriculum_folder is not None: curriculum_folder = \ '/{docker_target_name}/{curriculum_folder}'.format( docker_target_name=docker_target_name, curriculum_folder=curriculum_folder) model_path = '/{docker_target_name}/models/{run_id}-{sub_id}'.format( docker_target_name=docker_target_name, run_id=run_id, sub_id=sub_id) summaries_dir = '/{docker_target_name}/summaries'.format( docker_target_name=docker_target_name) trainer_config = load_config(trainer_config_path) env_factory = create_environment_factory( env_path, docker_target_name, no_graphics, run_seed, base_port + (sub_id * num_envs) ) env = SubprocessUnityEnvironment(env_factory, num_envs) maybe_meta_curriculum = try_create_meta_curriculum(curriculum_folder, env) # Create controller and begin training. tc = TrainerController(model_path, summaries_dir, run_id + '-' + str(sub_id), save_freq, maybe_meta_curriculum, load_model, train_model, keep_checkpoints, lesson, env.external_brains, run_seed, fast_simulation) # Signal that environment has been launched. process_queue.put(True) # Begin training tc.start_learning(env, trainer_config)
Get an action using this trainer's current policy. :param curr_info: Current BrainInfo. :return: The ActionInfo given by the policy given the BrainInfo.
def get_action(self, curr_info: BrainInfo) -> ActionInfo: """ Get an action using this trainer's current policy. :param curr_info: Current BrainInfo. :return: The ActionInfo given by the policy given the BrainInfo. """ self.trainer_metrics.start_experience_collection_timer() action = self.policy.get_action(curr_info) self.trainer_metrics.end_experience_collection_timer() return action
Saves text to Tensorboard. Note: Only works on tensorflow r1.2 or above. :param key: The name of the text. :param input_dict: A dictionary that will be displayed in a table on Tensorboard.
def write_tensorboard_text(self, key, input_dict): """ Saves text to Tensorboard. Note: Only works on tensorflow r1.2 or above. :param key: The name of the text. :param input_dict: A dictionary that will be displayed in a table on Tensorboard. """ try: with tf.Session() as sess: s_op = tf.summary.text(key, tf.convert_to_tensor( ([[str(x), str(input_dict[x])] for x in input_dict]))) s = sess.run(s_op) self.summary_writer.add_summary(s, self.get_step) except: LOGGER.info( "Cannot write text summary for Tensorboard. Tensorflow version must be r1.2 or above.") pass
A dict from brain name to the brain's curriculum's lesson number.
def lesson_nums(self): """A dict from brain name to the brain's curriculum's lesson number.""" lesson_nums = {} for brain_name, curriculum in self.brains_to_curriculums.items(): lesson_nums[brain_name] = curriculum.lesson_num return lesson_nums
Attempts to increments all the lessons of all the curriculums in this MetaCurriculum. Note that calling this method does not guarantee the lesson of a curriculum will increment. The lesson of a curriculum will only increment if the specified measure threshold defined in the curriculum has been reached and the minimum number of episodes in the lesson have been completed. Args: measure_vals (dict): A dict of brain name to measure value. reward_buff_sizes (dict): A dict of brain names to the size of their corresponding reward buffers. Returns: A dict from brain name to whether that brain's lesson number was incremented.
def increment_lessons(self, measure_vals, reward_buff_sizes=None): """Attempts to increments all the lessons of all the curriculums in this MetaCurriculum. Note that calling this method does not guarantee the lesson of a curriculum will increment. The lesson of a curriculum will only increment if the specified measure threshold defined in the curriculum has been reached and the minimum number of episodes in the lesson have been completed. Args: measure_vals (dict): A dict of brain name to measure value. reward_buff_sizes (dict): A dict of brain names to the size of their corresponding reward buffers. Returns: A dict from brain name to whether that brain's lesson number was incremented. """ ret = {} if reward_buff_sizes: for brain_name, buff_size in reward_buff_sizes.items(): if self._lesson_ready_to_increment(brain_name, buff_size): measure_val = measure_vals[brain_name] ret[brain_name] = (self.brains_to_curriculums[brain_name] .increment_lesson(measure_val)) else: for brain_name, measure_val in measure_vals.items(): ret[brain_name] = (self.brains_to_curriculums[brain_name] .increment_lesson(measure_val)) return ret
Sets all the curriculums in this meta curriculum to a specified lesson number. Args: lesson_num (int): The lesson number which all the curriculums will be set to.
def set_all_curriculums_to_lesson_num(self, lesson_num): """Sets all the curriculums in this meta curriculum to a specified lesson number. Args: lesson_num (int): The lesson number which all the curriculums will be set to. """ for _, curriculum in self.brains_to_curriculums.items(): curriculum.lesson_num = lesson_num
Get the combined configuration of all curriculums in this MetaCurriculum. Returns: A dict from parameter to value.
def get_config(self): """Get the combined configuration of all curriculums in this MetaCurriculum. Returns: A dict from parameter to value. """ config = {} for _, curriculum in self.brains_to_curriculums.items(): curr_config = curriculum.get_config() config.update(curr_config) return config
Sends a signal to reset the unity environment. :return: AllBrainInfo : A data structure corresponding to the initial reset state of the environment.
def reset(self, config=None, train_mode=True, custom_reset_parameters=None) -> AllBrainInfo: """ Sends a signal to reset the unity environment. :return: AllBrainInfo : A data structure corresponding to the initial reset state of the environment. """ if config is None: config = self._resetParameters elif config: logger.info("Academy reset with parameters: {0}" .format(', '.join([str(x) + ' -> ' + str(config[x]) for x in config]))) for k in config: if (k in self._resetParameters) and (isinstance(config[k], (int, float))): self._resetParameters[k] = config[k] elif not isinstance(config[k], (int, float)): raise UnityEnvironmentException( "The value for parameter '{0}'' must be an Integer or a Float.".format(k)) else: raise UnityEnvironmentException( "The parameter '{0}' is not a valid parameter.".format(k)) if self._loaded: outputs = self.communicator.exchange( self._generate_reset_input(train_mode, config, custom_reset_parameters) ) if outputs is None: raise KeyboardInterrupt rl_output = outputs.rl_output s = self._get_state(rl_output) self._global_done = s[1] for _b in self._external_brain_names: self._n_agents[_b] = len(s[0][_b].agents) return s[0] else: raise UnityEnvironmentException("No Unity environment is loaded.")
Provides the environment with an action, moves the environment dynamics forward accordingly, and returns observation, state, and reward information to the agent. :param value: Value estimates provided by agents. :param vector_action: Agent's vector action. Can be a scalar or vector of int/floats. :param memory: Vector corresponding to memory used for recurrent policies. :param text_action: Text action to send to environment for. :param custom_action: Optional instance of a CustomAction protobuf message. :return: AllBrainInfo : A Data structure corresponding to the new state of the environment.
def step(self, vector_action=None, memory=None, text_action=None, value=None, custom_action=None) -> AllBrainInfo: """ Provides the environment with an action, moves the environment dynamics forward accordingly, and returns observation, state, and reward information to the agent. :param value: Value estimates provided by agents. :param vector_action: Agent's vector action. Can be a scalar or vector of int/floats. :param memory: Vector corresponding to memory used for recurrent policies. :param text_action: Text action to send to environment for. :param custom_action: Optional instance of a CustomAction protobuf message. :return: AllBrainInfo : A Data structure corresponding to the new state of the environment. """ vector_action = {} if vector_action is None else vector_action memory = {} if memory is None else memory text_action = {} if text_action is None else text_action value = {} if value is None else value custom_action = {} if custom_action is None else custom_action # Check that environment is loaded, and episode is currently running. if self._loaded and not self._global_done and self._global_done is not None: if isinstance(vector_action, self.SINGLE_BRAIN_ACTION_TYPES): if self._num_external_brains == 1: vector_action = {self._external_brain_names[0]: vector_action} elif self._num_external_brains > 1: raise UnityActionException( "You have {0} brains, you need to feed a dictionary of brain names a keys, " "and vector_actions as values".format(self._num_brains)) else: raise UnityActionException( "There are no external brains in the environment, " "step cannot take a vector_action input") if isinstance(memory, self.SINGLE_BRAIN_ACTION_TYPES): if self._num_external_brains == 1: memory = {self._external_brain_names[0]: memory} elif self._num_external_brains > 1: raise UnityActionException( "You have {0} brains, you need to feed a dictionary of brain names as keys " "and memories as values".format(self._num_brains)) else: raise UnityActionException( "There are no external brains in the environment, " "step cannot take a memory input") if isinstance(text_action, self.SINGLE_BRAIN_TEXT_TYPES): if self._num_external_brains == 1: text_action = {self._external_brain_names[0]: text_action} elif self._num_external_brains > 1: raise UnityActionException( "You have {0} brains, you need to feed a dictionary of brain names as keys " "and text_actions as values".format(self._num_brains)) else: raise UnityActionException( "There are no external brains in the environment, " "step cannot take a value input") if isinstance(value, self.SINGLE_BRAIN_ACTION_TYPES): if self._num_external_brains == 1: value = {self._external_brain_names[0]: value} elif self._num_external_brains > 1: raise UnityActionException( "You have {0} brains, you need to feed a dictionary of brain names as keys " "and state/action value estimates as values".format(self._num_brains)) else: raise UnityActionException( "There are no external brains in the environment, " "step cannot take a value input") if isinstance(custom_action, CustomAction): if self._num_external_brains == 1: custom_action = {self._external_brain_names[0]: custom_action} elif self._num_external_brains > 1: raise UnityActionException( "You have {0} brains, you need to feed a dictionary of brain names as keys " "and CustomAction instances as values".format(self._num_brains)) else: raise UnityActionException( "There are no external brains in the environment, " "step cannot take a custom_action input") for brain_name in list(vector_action.keys()) + list(memory.keys()) + list( text_action.keys()): if brain_name not in self._external_brain_names: raise UnityActionException( "The name {0} does not correspond to an external brain " "in the environment".format(brain_name)) for brain_name in self._external_brain_names: n_agent = self._n_agents[brain_name] if brain_name not in vector_action: if self._brains[brain_name].vector_action_space_type == "discrete": vector_action[brain_name] = [0.0] * n_agent * len( self._brains[brain_name].vector_action_space_size) else: vector_action[brain_name] = [0.0] * n_agent * \ self._brains[ brain_name].vector_action_space_size[0] else: vector_action[brain_name] = self._flatten(vector_action[brain_name]) if brain_name not in memory: memory[brain_name] = [] else: if memory[brain_name] is None: memory[brain_name] = [] else: memory[brain_name] = self._flatten(memory[brain_name]) if brain_name not in text_action: text_action[brain_name] = [""] * n_agent else: if text_action[brain_name] is None: text_action[brain_name] = [""] * n_agent if isinstance(text_action[brain_name], str): text_action[brain_name] = [text_action[brain_name]] * n_agent if brain_name not in custom_action: custom_action[brain_name] = [None] * n_agent else: if custom_action[brain_name] is None: custom_action[brain_name] = [None] * n_agent if isinstance(custom_action[brain_name], CustomAction): custom_action[brain_name] = [custom_action[brain_name]] * n_agent number_text_actions = len(text_action[brain_name]) if not ((number_text_actions == n_agent) or number_text_actions == 0): raise UnityActionException( "There was a mismatch between the provided text_action and " "the environment's expectation: " "The brain {0} expected {1} text_action but was given {2}".format( brain_name, n_agent, number_text_actions)) discrete_check = self._brains[brain_name].vector_action_space_type == "discrete" expected_discrete_size = n_agent * len( self._brains[brain_name].vector_action_space_size) continuous_check = self._brains[brain_name].vector_action_space_type == "continuous" expected_continuous_size = self._brains[brain_name].vector_action_space_size[ 0] * n_agent if not ((discrete_check and len( vector_action[brain_name]) == expected_discrete_size) or (continuous_check and len( vector_action[brain_name]) == expected_continuous_size)): raise UnityActionException( "There was a mismatch between the provided action and " "the environment's expectation: " "The brain {0} expected {1} {2} action(s), but was provided: {3}" .format(brain_name, str(expected_discrete_size) if discrete_check else str(expected_continuous_size), self._brains[brain_name].vector_action_space_type, str(vector_action[brain_name]))) outputs = self.communicator.exchange( self._generate_step_input(vector_action, memory, text_action, value, custom_action)) if outputs is None: raise KeyboardInterrupt rl_output = outputs.rl_output state = self._get_state(rl_output) self._global_done = state[1] for _b in self._external_brain_names: self._n_agents[_b] = len(state[0][_b].agents) return state[0] elif not self._loaded: raise UnityEnvironmentException("No Unity environment is loaded.") elif self._global_done: raise UnityActionException( "The episode is completed. Reset the environment with 'reset()'") elif self.global_done is None: raise UnityActionException( "You cannot conduct step without first calling reset. " "Reset the environment with 'reset()'")
Converts arrays to list. :param arr: numpy vector. :return: flattened list.
def _flatten(cls, arr) -> List[float]: """ Converts arrays to list. :param arr: numpy vector. :return: flattened list. """ if isinstance(arr, cls.SCALAR_ACTION_TYPES): arr = [float(arr)] if isinstance(arr, np.ndarray): arr = arr.tolist() if len(arr) == 0: return arr if isinstance(arr[0], np.ndarray): arr = [item for sublist in arr for item in sublist.tolist()] if isinstance(arr[0], list): arr = [item for sublist in arr for item in sublist] arr = [float(x) for x in arr] return arr
Collects experience information from all external brains in environment at current step. :return: a dictionary of BrainInfo objects.
def _get_state(self, output: UnityRLOutput) -> (AllBrainInfo, bool): """ Collects experience information from all external brains in environment at current step. :return: a dictionary of BrainInfo objects. """ _data = {} global_done = output.global_done for brain_name in output.agentInfos: agent_info_list = output.agentInfos[brain_name].value _data[brain_name] = BrainInfo.from_agent_proto(agent_info_list, self.brains[brain_name]) return _data, global_done