repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
sequence
docstring
stringlengths
3
17.3k
docstring_tokens
sequence
sha
stringlengths
40
40
url
stringlengths
87
242
partition
stringclasses
1 value
persephone-tools/persephone
persephone/corpus.py
Corpus.initialize_labels
def initialize_labels(self, labels: Set[str]) -> Tuple[dict, dict]: """Create mappings from label to index and index to label""" logger.debug("Creating mappings for labels") label_to_index = {label: index for index, label in enumerate( ["pad"] + sorted(list(labels)))} index_to_label = {index: phn for index, phn in enumerate( ["pad"] + sorted(list(labels)))} return label_to_index, index_to_label
python
def initialize_labels(self, labels: Set[str]) -> Tuple[dict, dict]: """Create mappings from label to index and index to label""" logger.debug("Creating mappings for labels") label_to_index = {label: index for index, label in enumerate( ["pad"] + sorted(list(labels)))} index_to_label = {index: phn for index, phn in enumerate( ["pad"] + sorted(list(labels)))} return label_to_index, index_to_label
[ "def", "initialize_labels", "(", "self", ",", "labels", ":", "Set", "[", "str", "]", ")", "->", "Tuple", "[", "dict", ",", "dict", "]", ":", "logger", ".", "debug", "(", "\"Creating mappings for labels\"", ")", "label_to_index", "=", "{", "label", ":", "index", "for", "index", ",", "label", "in", "enumerate", "(", "[", "\"pad\"", "]", "+", "sorted", "(", "list", "(", "labels", ")", ")", ")", "}", "index_to_label", "=", "{", "index", ":", "phn", "for", "index", ",", "phn", "in", "enumerate", "(", "[", "\"pad\"", "]", "+", "sorted", "(", "list", "(", "labels", ")", ")", ")", "}", "return", "label_to_index", ",", "index_to_label" ]
Create mappings from label to index and index to label
[ "Create", "mappings", "from", "label", "to", "index", "and", "index", "to", "label" ]
f94c63e4d5fe719fb1deba449b177bb299d225fb
https://github.com/persephone-tools/persephone/blob/f94c63e4d5fe719fb1deba449b177bb299d225fb/persephone/corpus.py#L357-L366
train
persephone-tools/persephone
persephone/corpus.py
Corpus.prepare_feats
def prepare_feats(self) -> None: """ Prepares input features""" logger.debug("Preparing input features") self.feat_dir.mkdir(parents=True, exist_ok=True) should_extract_feats = False for path in self.wav_dir.iterdir(): if not path.suffix == ".wav": logger.info("Non wav file found in wav directory: %s", path) continue prefix = os.path.basename(os.path.splitext(str(path))[0]) mono16k_wav_path = self.feat_dir / "{}.wav".format(prefix) feat_path = self.feat_dir / "{}.{}.npy".format(prefix, self.feat_type) if not feat_path.is_file(): # Then we should extract feats should_extract_feats = True if not mono16k_wav_path.is_file(): feat_extract.convert_wav(path, mono16k_wav_path) # TODO Should be extracting feats on a per-file basis. Right now we # check if any feats files don't exist and then do all the feature # extraction. if should_extract_feats: feat_extract.from_dir(self.feat_dir, self.feat_type)
python
def prepare_feats(self) -> None: """ Prepares input features""" logger.debug("Preparing input features") self.feat_dir.mkdir(parents=True, exist_ok=True) should_extract_feats = False for path in self.wav_dir.iterdir(): if not path.suffix == ".wav": logger.info("Non wav file found in wav directory: %s", path) continue prefix = os.path.basename(os.path.splitext(str(path))[0]) mono16k_wav_path = self.feat_dir / "{}.wav".format(prefix) feat_path = self.feat_dir / "{}.{}.npy".format(prefix, self.feat_type) if not feat_path.is_file(): # Then we should extract feats should_extract_feats = True if not mono16k_wav_path.is_file(): feat_extract.convert_wav(path, mono16k_wav_path) # TODO Should be extracting feats on a per-file basis. Right now we # check if any feats files don't exist and then do all the feature # extraction. if should_extract_feats: feat_extract.from_dir(self.feat_dir, self.feat_type)
[ "def", "prepare_feats", "(", "self", ")", "->", "None", ":", "logger", ".", "debug", "(", "\"Preparing input features\"", ")", "self", ".", "feat_dir", ".", "mkdir", "(", "parents", "=", "True", ",", "exist_ok", "=", "True", ")", "should_extract_feats", "=", "False", "for", "path", "in", "self", ".", "wav_dir", ".", "iterdir", "(", ")", ":", "if", "not", "path", ".", "suffix", "==", "\".wav\"", ":", "logger", ".", "info", "(", "\"Non wav file found in wav directory: %s\"", ",", "path", ")", "continue", "prefix", "=", "os", ".", "path", ".", "basename", "(", "os", ".", "path", ".", "splitext", "(", "str", "(", "path", ")", ")", "[", "0", "]", ")", "mono16k_wav_path", "=", "self", ".", "feat_dir", "/", "\"{}.wav\"", ".", "format", "(", "prefix", ")", "feat_path", "=", "self", ".", "feat_dir", "/", "\"{}.{}.npy\"", ".", "format", "(", "prefix", ",", "self", ".", "feat_type", ")", "if", "not", "feat_path", ".", "is_file", "(", ")", ":", "# Then we should extract feats", "should_extract_feats", "=", "True", "if", "not", "mono16k_wav_path", ".", "is_file", "(", ")", ":", "feat_extract", ".", "convert_wav", "(", "path", ",", "mono16k_wav_path", ")", "# TODO Should be extracting feats on a per-file basis. Right now we", "# check if any feats files don't exist and then do all the feature", "# extraction.", "if", "should_extract_feats", ":", "feat_extract", ".", "from_dir", "(", "self", ".", "feat_dir", ",", "self", ".", "feat_type", ")" ]
Prepares input features
[ "Prepares", "input", "features" ]
f94c63e4d5fe719fb1deba449b177bb299d225fb
https://github.com/persephone-tools/persephone/blob/f94c63e4d5fe719fb1deba449b177bb299d225fb/persephone/corpus.py#L368-L392
train
persephone-tools/persephone
persephone/corpus.py
Corpus.make_data_splits
def make_data_splits(self, max_samples: int) -> None: """ Splits the utterances into training, validation and test sets.""" train_f_exists = self.train_prefix_fn.is_file() valid_f_exists = self.valid_prefix_fn.is_file() test_f_exists = self.test_prefix_fn.is_file() if train_f_exists and valid_f_exists and test_f_exists: logger.debug("Split for training, validation and tests specified by files") self.train_prefixes = self.read_prefixes(self.train_prefix_fn) self.valid_prefixes = self.read_prefixes(self.valid_prefix_fn) self.test_prefixes = self.read_prefixes(self.test_prefix_fn) return # Otherwise we now need to load prefixes for other cases addressed # below prefixes = self.determine_prefixes() prefixes = utils.filter_by_size( self.feat_dir, prefixes, self.feat_type, max_samples) if not train_f_exists and not valid_f_exists and not test_f_exists: logger.debug("No files supplied to define the split for training, validation" " and tests. Using default.") train_prefixes, valid_prefixes, test_prefixes = self.divide_prefixes(prefixes) self.train_prefixes = train_prefixes self.valid_prefixes = valid_prefixes self.test_prefixes = test_prefixes self.write_prefixes(train_prefixes, self.train_prefix_fn) self.write_prefixes(valid_prefixes, self.valid_prefix_fn) self.write_prefixes(test_prefixes, self.test_prefix_fn) elif not train_f_exists and valid_f_exists and test_f_exists: # Then we just make all other prefixes training prefixes. self.valid_prefixes = self.read_prefixes(self.valid_prefix_fn) self.test_prefixes = self.read_prefixes(self.test_prefix_fn) train_prefixes = list( set(prefixes) - set(self.valid_prefixes)) self.train_prefixes = list( set(train_prefixes) - set(self.test_prefixes)) self.write_prefixes(self.train_prefixes, self.train_prefix_fn) else: raise NotImplementedError( "The following case has not been implemented:" + "{} exists - {}\n".format(self.train_prefix_fn, train_f_exists) + "{} exists - {}\n".format(self.valid_prefix_fn, valid_f_exists) + "{} exists - {}\n".format(self.test_prefix_fn, test_f_exists))
python
def make_data_splits(self, max_samples: int) -> None: """ Splits the utterances into training, validation and test sets.""" train_f_exists = self.train_prefix_fn.is_file() valid_f_exists = self.valid_prefix_fn.is_file() test_f_exists = self.test_prefix_fn.is_file() if train_f_exists and valid_f_exists and test_f_exists: logger.debug("Split for training, validation and tests specified by files") self.train_prefixes = self.read_prefixes(self.train_prefix_fn) self.valid_prefixes = self.read_prefixes(self.valid_prefix_fn) self.test_prefixes = self.read_prefixes(self.test_prefix_fn) return # Otherwise we now need to load prefixes for other cases addressed # below prefixes = self.determine_prefixes() prefixes = utils.filter_by_size( self.feat_dir, prefixes, self.feat_type, max_samples) if not train_f_exists and not valid_f_exists and not test_f_exists: logger.debug("No files supplied to define the split for training, validation" " and tests. Using default.") train_prefixes, valid_prefixes, test_prefixes = self.divide_prefixes(prefixes) self.train_prefixes = train_prefixes self.valid_prefixes = valid_prefixes self.test_prefixes = test_prefixes self.write_prefixes(train_prefixes, self.train_prefix_fn) self.write_prefixes(valid_prefixes, self.valid_prefix_fn) self.write_prefixes(test_prefixes, self.test_prefix_fn) elif not train_f_exists and valid_f_exists and test_f_exists: # Then we just make all other prefixes training prefixes. self.valid_prefixes = self.read_prefixes(self.valid_prefix_fn) self.test_prefixes = self.read_prefixes(self.test_prefix_fn) train_prefixes = list( set(prefixes) - set(self.valid_prefixes)) self.train_prefixes = list( set(train_prefixes) - set(self.test_prefixes)) self.write_prefixes(self.train_prefixes, self.train_prefix_fn) else: raise NotImplementedError( "The following case has not been implemented:" + "{} exists - {}\n".format(self.train_prefix_fn, train_f_exists) + "{} exists - {}\n".format(self.valid_prefix_fn, valid_f_exists) + "{} exists - {}\n".format(self.test_prefix_fn, test_f_exists))
[ "def", "make_data_splits", "(", "self", ",", "max_samples", ":", "int", ")", "->", "None", ":", "train_f_exists", "=", "self", ".", "train_prefix_fn", ".", "is_file", "(", ")", "valid_f_exists", "=", "self", ".", "valid_prefix_fn", ".", "is_file", "(", ")", "test_f_exists", "=", "self", ".", "test_prefix_fn", ".", "is_file", "(", ")", "if", "train_f_exists", "and", "valid_f_exists", "and", "test_f_exists", ":", "logger", ".", "debug", "(", "\"Split for training, validation and tests specified by files\"", ")", "self", ".", "train_prefixes", "=", "self", ".", "read_prefixes", "(", "self", ".", "train_prefix_fn", ")", "self", ".", "valid_prefixes", "=", "self", ".", "read_prefixes", "(", "self", ".", "valid_prefix_fn", ")", "self", ".", "test_prefixes", "=", "self", ".", "read_prefixes", "(", "self", ".", "test_prefix_fn", ")", "return", "# Otherwise we now need to load prefixes for other cases addressed", "# below", "prefixes", "=", "self", ".", "determine_prefixes", "(", ")", "prefixes", "=", "utils", ".", "filter_by_size", "(", "self", ".", "feat_dir", ",", "prefixes", ",", "self", ".", "feat_type", ",", "max_samples", ")", "if", "not", "train_f_exists", "and", "not", "valid_f_exists", "and", "not", "test_f_exists", ":", "logger", ".", "debug", "(", "\"No files supplied to define the split for training, validation\"", "\" and tests. Using default.\"", ")", "train_prefixes", ",", "valid_prefixes", ",", "test_prefixes", "=", "self", ".", "divide_prefixes", "(", "prefixes", ")", "self", ".", "train_prefixes", "=", "train_prefixes", "self", ".", "valid_prefixes", "=", "valid_prefixes", "self", ".", "test_prefixes", "=", "test_prefixes", "self", ".", "write_prefixes", "(", "train_prefixes", ",", "self", ".", "train_prefix_fn", ")", "self", ".", "write_prefixes", "(", "valid_prefixes", ",", "self", ".", "valid_prefix_fn", ")", "self", ".", "write_prefixes", "(", "test_prefixes", ",", "self", ".", "test_prefix_fn", ")", "elif", "not", "train_f_exists", "and", "valid_f_exists", "and", "test_f_exists", ":", "# Then we just make all other prefixes training prefixes.", "self", ".", "valid_prefixes", "=", "self", ".", "read_prefixes", "(", "self", ".", "valid_prefix_fn", ")", "self", ".", "test_prefixes", "=", "self", ".", "read_prefixes", "(", "self", ".", "test_prefix_fn", ")", "train_prefixes", "=", "list", "(", "set", "(", "prefixes", ")", "-", "set", "(", "self", ".", "valid_prefixes", ")", ")", "self", ".", "train_prefixes", "=", "list", "(", "set", "(", "train_prefixes", ")", "-", "set", "(", "self", ".", "test_prefixes", ")", ")", "self", ".", "write_prefixes", "(", "self", ".", "train_prefixes", ",", "self", ".", "train_prefix_fn", ")", "else", ":", "raise", "NotImplementedError", "(", "\"The following case has not been implemented:\"", "+", "\"{} exists - {}\\n\"", ".", "format", "(", "self", ".", "train_prefix_fn", ",", "train_f_exists", ")", "+", "\"{} exists - {}\\n\"", ".", "format", "(", "self", ".", "valid_prefix_fn", ",", "valid_f_exists", ")", "+", "\"{} exists - {}\\n\"", ".", "format", "(", "self", ".", "test_prefix_fn", ",", "test_f_exists", ")", ")" ]
Splits the utterances into training, validation and test sets.
[ "Splits", "the", "utterances", "into", "training", "validation", "and", "test", "sets", "." ]
f94c63e4d5fe719fb1deba449b177bb299d225fb
https://github.com/persephone-tools/persephone/blob/f94c63e4d5fe719fb1deba449b177bb299d225fb/persephone/corpus.py#L394-L438
train
persephone-tools/persephone
persephone/corpus.py
Corpus.divide_prefixes
def divide_prefixes(prefixes: List[str], seed:int=0) -> Tuple[List[str], List[str], List[str]]: """Divide data into training, validation and test subsets""" if len(prefixes) < 3: raise PersephoneException( "{} cannot be split into 3 groups as it only has {} items".format(prefixes, len(prefixes)) ) Ratios = namedtuple("Ratios", ["train", "valid", "test"]) ratios=Ratios(.90, .05, .05) train_end = int(ratios.train*len(prefixes)) valid_end = int(train_end + ratios.valid*len(prefixes)) # We must make sure that at least one element exists in test if valid_end == len(prefixes): valid_end -= 1 # If train_end and valid_end are the same we end up with no valid_prefixes # so we must ensure at least one prefix is placed in this category if train_end == valid_end: train_end -= 1 random.seed(seed) random.shuffle(prefixes) train_prefixes = prefixes[:train_end] valid_prefixes = prefixes[train_end:valid_end] test_prefixes = prefixes[valid_end:] assert train_prefixes, "Got empty set for training data" assert valid_prefixes, "Got empty set for validation data" assert test_prefixes, "Got empty set for testing data" return train_prefixes, valid_prefixes, test_prefixes
python
def divide_prefixes(prefixes: List[str], seed:int=0) -> Tuple[List[str], List[str], List[str]]: """Divide data into training, validation and test subsets""" if len(prefixes) < 3: raise PersephoneException( "{} cannot be split into 3 groups as it only has {} items".format(prefixes, len(prefixes)) ) Ratios = namedtuple("Ratios", ["train", "valid", "test"]) ratios=Ratios(.90, .05, .05) train_end = int(ratios.train*len(prefixes)) valid_end = int(train_end + ratios.valid*len(prefixes)) # We must make sure that at least one element exists in test if valid_end == len(prefixes): valid_end -= 1 # If train_end and valid_end are the same we end up with no valid_prefixes # so we must ensure at least one prefix is placed in this category if train_end == valid_end: train_end -= 1 random.seed(seed) random.shuffle(prefixes) train_prefixes = prefixes[:train_end] valid_prefixes = prefixes[train_end:valid_end] test_prefixes = prefixes[valid_end:] assert train_prefixes, "Got empty set for training data" assert valid_prefixes, "Got empty set for validation data" assert test_prefixes, "Got empty set for testing data" return train_prefixes, valid_prefixes, test_prefixes
[ "def", "divide_prefixes", "(", "prefixes", ":", "List", "[", "str", "]", ",", "seed", ":", "int", "=", "0", ")", "->", "Tuple", "[", "List", "[", "str", "]", ",", "List", "[", "str", "]", ",", "List", "[", "str", "]", "]", ":", "if", "len", "(", "prefixes", ")", "<", "3", ":", "raise", "PersephoneException", "(", "\"{} cannot be split into 3 groups as it only has {} items\"", ".", "format", "(", "prefixes", ",", "len", "(", "prefixes", ")", ")", ")", "Ratios", "=", "namedtuple", "(", "\"Ratios\"", ",", "[", "\"train\"", ",", "\"valid\"", ",", "\"test\"", "]", ")", "ratios", "=", "Ratios", "(", ".90", ",", ".05", ",", ".05", ")", "train_end", "=", "int", "(", "ratios", ".", "train", "*", "len", "(", "prefixes", ")", ")", "valid_end", "=", "int", "(", "train_end", "+", "ratios", ".", "valid", "*", "len", "(", "prefixes", ")", ")", "# We must make sure that at least one element exists in test", "if", "valid_end", "==", "len", "(", "prefixes", ")", ":", "valid_end", "-=", "1", "# If train_end and valid_end are the same we end up with no valid_prefixes", "# so we must ensure at least one prefix is placed in this category", "if", "train_end", "==", "valid_end", ":", "train_end", "-=", "1", "random", ".", "seed", "(", "seed", ")", "random", ".", "shuffle", "(", "prefixes", ")", "train_prefixes", "=", "prefixes", "[", ":", "train_end", "]", "valid_prefixes", "=", "prefixes", "[", "train_end", ":", "valid_end", "]", "test_prefixes", "=", "prefixes", "[", "valid_end", ":", "]", "assert", "train_prefixes", ",", "\"Got empty set for training data\"", "assert", "valid_prefixes", ",", "\"Got empty set for validation data\"", "assert", "test_prefixes", ",", "\"Got empty set for testing data\"", "return", "train_prefixes", ",", "valid_prefixes", ",", "test_prefixes" ]
Divide data into training, validation and test subsets
[ "Divide", "data", "into", "training", "validation", "and", "test", "subsets" ]
f94c63e4d5fe719fb1deba449b177bb299d225fb
https://github.com/persephone-tools/persephone/blob/f94c63e4d5fe719fb1deba449b177bb299d225fb/persephone/corpus.py#L464-L495
train
persephone-tools/persephone
persephone/corpus.py
Corpus.indices_to_labels
def indices_to_labels(self, indices: Sequence[int]) -> List[str]: """ Converts a sequence of indices into their corresponding labels.""" return [(self.INDEX_TO_LABEL[index]) for index in indices]
python
def indices_to_labels(self, indices: Sequence[int]) -> List[str]: """ Converts a sequence of indices into their corresponding labels.""" return [(self.INDEX_TO_LABEL[index]) for index in indices]
[ "def", "indices_to_labels", "(", "self", ",", "indices", ":", "Sequence", "[", "int", "]", ")", "->", "List", "[", "str", "]", ":", "return", "[", "(", "self", ".", "INDEX_TO_LABEL", "[", "index", "]", ")", "for", "index", "in", "indices", "]" ]
Converts a sequence of indices into their corresponding labels.
[ "Converts", "a", "sequence", "of", "indices", "into", "their", "corresponding", "labels", "." ]
f94c63e4d5fe719fb1deba449b177bb299d225fb
https://github.com/persephone-tools/persephone/blob/f94c63e4d5fe719fb1deba449b177bb299d225fb/persephone/corpus.py#L497-L500
train
persephone-tools/persephone
persephone/corpus.py
Corpus.labels_to_indices
def labels_to_indices(self, labels: Sequence[str]) -> List[int]: """ Converts a sequence of labels into their corresponding indices.""" return [self.LABEL_TO_INDEX[label] for label in labels]
python
def labels_to_indices(self, labels: Sequence[str]) -> List[int]: """ Converts a sequence of labels into their corresponding indices.""" return [self.LABEL_TO_INDEX[label] for label in labels]
[ "def", "labels_to_indices", "(", "self", ",", "labels", ":", "Sequence", "[", "str", "]", ")", "->", "List", "[", "int", "]", ":", "return", "[", "self", ".", "LABEL_TO_INDEX", "[", "label", "]", "for", "label", "in", "labels", "]" ]
Converts a sequence of labels into their corresponding indices.
[ "Converts", "a", "sequence", "of", "labels", "into", "their", "corresponding", "indices", "." ]
f94c63e4d5fe719fb1deba449b177bb299d225fb
https://github.com/persephone-tools/persephone/blob/f94c63e4d5fe719fb1deba449b177bb299d225fb/persephone/corpus.py#L502-L505
train
persephone-tools/persephone
persephone/corpus.py
Corpus.num_feats
def num_feats(self): """ The number of features per time step in the corpus. """ if not self._num_feats: filename = self.get_train_fns()[0][0] feats = np.load(filename) # pylint: disable=maybe-no-member if len(feats.shape) == 3: # Then there are multiple channels of multiple feats self._num_feats = feats.shape[1] * feats.shape[2] elif len(feats.shape) == 2: # Otherwise it is just of shape time x feats self._num_feats = feats.shape[1] else: raise ValueError( "Feature matrix of shape %s unexpected" % str(feats.shape)) return self._num_feats
python
def num_feats(self): """ The number of features per time step in the corpus. """ if not self._num_feats: filename = self.get_train_fns()[0][0] feats = np.load(filename) # pylint: disable=maybe-no-member if len(feats.shape) == 3: # Then there are multiple channels of multiple feats self._num_feats = feats.shape[1] * feats.shape[2] elif len(feats.shape) == 2: # Otherwise it is just of shape time x feats self._num_feats = feats.shape[1] else: raise ValueError( "Feature matrix of shape %s unexpected" % str(feats.shape)) return self._num_feats
[ "def", "num_feats", "(", "self", ")", ":", "if", "not", "self", ".", "_num_feats", ":", "filename", "=", "self", ".", "get_train_fns", "(", ")", "[", "0", "]", "[", "0", "]", "feats", "=", "np", ".", "load", "(", "filename", ")", "# pylint: disable=maybe-no-member", "if", "len", "(", "feats", ".", "shape", ")", "==", "3", ":", "# Then there are multiple channels of multiple feats", "self", ".", "_num_feats", "=", "feats", ".", "shape", "[", "1", "]", "*", "feats", ".", "shape", "[", "2", "]", "elif", "len", "(", "feats", ".", "shape", ")", "==", "2", ":", "# Otherwise it is just of shape time x feats", "self", ".", "_num_feats", "=", "feats", ".", "shape", "[", "1", "]", "else", ":", "raise", "ValueError", "(", "\"Feature matrix of shape %s unexpected\"", "%", "str", "(", "feats", ".", "shape", ")", ")", "return", "self", ".", "_num_feats" ]
The number of features per time step in the corpus.
[ "The", "number", "of", "features", "per", "time", "step", "in", "the", "corpus", "." ]
f94c63e4d5fe719fb1deba449b177bb299d225fb
https://github.com/persephone-tools/persephone/blob/f94c63e4d5fe719fb1deba449b177bb299d225fb/persephone/corpus.py#L508-L523
train
persephone-tools/persephone
persephone/corpus.py
Corpus.prefixes_to_fns
def prefixes_to_fns(self, prefixes: List[str]) -> Tuple[List[str], List[str]]: """ Fetches the file paths to the features files and labels files corresponding to the provided list of features""" # TODO Return pathlib.Paths feat_fns = [str(self.feat_dir / ("%s.%s.npy" % (prefix, self.feat_type))) for prefix in prefixes] label_fns = [str(self.label_dir / ("%s.%s" % (prefix, self.label_type))) for prefix in prefixes] return feat_fns, label_fns
python
def prefixes_to_fns(self, prefixes: List[str]) -> Tuple[List[str], List[str]]: """ Fetches the file paths to the features files and labels files corresponding to the provided list of features""" # TODO Return pathlib.Paths feat_fns = [str(self.feat_dir / ("%s.%s.npy" % (prefix, self.feat_type))) for prefix in prefixes] label_fns = [str(self.label_dir / ("%s.%s" % (prefix, self.label_type))) for prefix in prefixes] return feat_fns, label_fns
[ "def", "prefixes_to_fns", "(", "self", ",", "prefixes", ":", "List", "[", "str", "]", ")", "->", "Tuple", "[", "List", "[", "str", "]", ",", "List", "[", "str", "]", "]", ":", "# TODO Return pathlib.Paths", "feat_fns", "=", "[", "str", "(", "self", ".", "feat_dir", "/", "(", "\"%s.%s.npy\"", "%", "(", "prefix", ",", "self", ".", "feat_type", ")", ")", ")", "for", "prefix", "in", "prefixes", "]", "label_fns", "=", "[", "str", "(", "self", ".", "label_dir", "/", "(", "\"%s.%s\"", "%", "(", "prefix", ",", "self", ".", "label_type", ")", ")", ")", "for", "prefix", "in", "prefixes", "]", "return", "feat_fns", ",", "label_fns" ]
Fetches the file paths to the features files and labels files corresponding to the provided list of features
[ "Fetches", "the", "file", "paths", "to", "the", "features", "files", "and", "labels", "files", "corresponding", "to", "the", "provided", "list", "of", "features" ]
f94c63e4d5fe719fb1deba449b177bb299d225fb
https://github.com/persephone-tools/persephone/blob/f94c63e4d5fe719fb1deba449b177bb299d225fb/persephone/corpus.py#L525-L533
train
persephone-tools/persephone
persephone/corpus.py
Corpus.get_train_fns
def get_train_fns(self) -> Tuple[List[str], List[str]]: """ Fetches the training set of the corpus. Outputs a Tuple of size 2, where the first element is a list of paths to input features files, one per utterance. The second element is a list of paths to the transcriptions. """ return self.prefixes_to_fns(self.train_prefixes)
python
def get_train_fns(self) -> Tuple[List[str], List[str]]: """ Fetches the training set of the corpus. Outputs a Tuple of size 2, where the first element is a list of paths to input features files, one per utterance. The second element is a list of paths to the transcriptions. """ return self.prefixes_to_fns(self.train_prefixes)
[ "def", "get_train_fns", "(", "self", ")", "->", "Tuple", "[", "List", "[", "str", "]", ",", "List", "[", "str", "]", "]", ":", "return", "self", ".", "prefixes_to_fns", "(", "self", ".", "train_prefixes", ")" ]
Fetches the training set of the corpus. Outputs a Tuple of size 2, where the first element is a list of paths to input features files, one per utterance. The second element is a list of paths to the transcriptions.
[ "Fetches", "the", "training", "set", "of", "the", "corpus", "." ]
f94c63e4d5fe719fb1deba449b177bb299d225fb
https://github.com/persephone-tools/persephone/blob/f94c63e4d5fe719fb1deba449b177bb299d225fb/persephone/corpus.py#L535-L542
train
persephone-tools/persephone
persephone/corpus.py
Corpus.get_valid_fns
def get_valid_fns(self) -> Tuple[List[str], List[str]]: """ Fetches the validation set of the corpus.""" return self.prefixes_to_fns(self.valid_prefixes)
python
def get_valid_fns(self) -> Tuple[List[str], List[str]]: """ Fetches the validation set of the corpus.""" return self.prefixes_to_fns(self.valid_prefixes)
[ "def", "get_valid_fns", "(", "self", ")", "->", "Tuple", "[", "List", "[", "str", "]", ",", "List", "[", "str", "]", "]", ":", "return", "self", ".", "prefixes_to_fns", "(", "self", ".", "valid_prefixes", ")" ]
Fetches the validation set of the corpus.
[ "Fetches", "the", "validation", "set", "of", "the", "corpus", "." ]
f94c63e4d5fe719fb1deba449b177bb299d225fb
https://github.com/persephone-tools/persephone/blob/f94c63e4d5fe719fb1deba449b177bb299d225fb/persephone/corpus.py#L544-L546
train
persephone-tools/persephone
persephone/corpus.py
Corpus.review
def review(self) -> None: """ Used to play the WAV files and compare with the transcription. """ for prefix in self.determine_prefixes(): print("Utterance: {}".format(prefix)) wav_fn = self.feat_dir / "{}.wav".format(prefix) label_fn = self.label_dir / "{}.{}".format(prefix,self.label_type) with label_fn.open() as f: transcript = f.read().strip() print("Transcription: {}".format(transcript)) subprocess.run(["play", str(wav_fn)])
python
def review(self) -> None: """ Used to play the WAV files and compare with the transcription. """ for prefix in self.determine_prefixes(): print("Utterance: {}".format(prefix)) wav_fn = self.feat_dir / "{}.wav".format(prefix) label_fn = self.label_dir / "{}.{}".format(prefix,self.label_type) with label_fn.open() as f: transcript = f.read().strip() print("Transcription: {}".format(transcript)) subprocess.run(["play", str(wav_fn)])
[ "def", "review", "(", "self", ")", "->", "None", ":", "for", "prefix", "in", "self", ".", "determine_prefixes", "(", ")", ":", "print", "(", "\"Utterance: {}\"", ".", "format", "(", "prefix", ")", ")", "wav_fn", "=", "self", ".", "feat_dir", "/", "\"{}.wav\"", ".", "format", "(", "prefix", ")", "label_fn", "=", "self", ".", "label_dir", "/", "\"{}.{}\"", ".", "format", "(", "prefix", ",", "self", ".", "label_type", ")", "with", "label_fn", ".", "open", "(", ")", "as", "f", ":", "transcript", "=", "f", ".", "read", "(", ")", ".", "strip", "(", ")", "print", "(", "\"Transcription: {}\"", ".", "format", "(", "transcript", ")", ")", "subprocess", ".", "run", "(", "[", "\"play\"", ",", "str", "(", "wav_fn", ")", "]", ")" ]
Used to play the WAV files and compare with the transcription.
[ "Used", "to", "play", "the", "WAV", "files", "and", "compare", "with", "the", "transcription", "." ]
f94c63e4d5fe719fb1deba449b177bb299d225fb
https://github.com/persephone-tools/persephone/blob/f94c63e4d5fe719fb1deba449b177bb299d225fb/persephone/corpus.py#L589-L599
train
persephone-tools/persephone
persephone/corpus.py
Corpus.pickle
def pickle(self) -> None: """ Pickles the Corpus object in a file in tgt_dir. """ pickle_path = self.tgt_dir / "corpus.p" logger.debug("pickling %r object and saving it to path %s", self, pickle_path) with pickle_path.open("wb") as f: pickle.dump(self, f)
python
def pickle(self) -> None: """ Pickles the Corpus object in a file in tgt_dir. """ pickle_path = self.tgt_dir / "corpus.p" logger.debug("pickling %r object and saving it to path %s", self, pickle_path) with pickle_path.open("wb") as f: pickle.dump(self, f)
[ "def", "pickle", "(", "self", ")", "->", "None", ":", "pickle_path", "=", "self", ".", "tgt_dir", "/", "\"corpus.p\"", "logger", ".", "debug", "(", "\"pickling %r object and saving it to path %s\"", ",", "self", ",", "pickle_path", ")", "with", "pickle_path", ".", "open", "(", "\"wb\"", ")", "as", "f", ":", "pickle", ".", "dump", "(", "self", ",", "f", ")" ]
Pickles the Corpus object in a file in tgt_dir.
[ "Pickles", "the", "Corpus", "object", "in", "a", "file", "in", "tgt_dir", "." ]
f94c63e4d5fe719fb1deba449b177bb299d225fb
https://github.com/persephone-tools/persephone/blob/f94c63e4d5fe719fb1deba449b177bb299d225fb/persephone/corpus.py#L601-L607
train
persephone-tools/persephone
persephone/utils.py
zero_pad
def zero_pad(matrix, to_length): """ Zero pads along the 0th dimension to make sure the utterance array x is of length to_length.""" assert matrix.shape[0] <= to_length if not matrix.shape[0] <= to_length: logger.error("zero_pad cannot be performed on matrix with shape {}" " to length {}".format(matrix.shape[0], to_length)) raise ValueError result = np.zeros((to_length,) + matrix.shape[1:]) result[:matrix.shape[0]] = matrix return result
python
def zero_pad(matrix, to_length): """ Zero pads along the 0th dimension to make sure the utterance array x is of length to_length.""" assert matrix.shape[0] <= to_length if not matrix.shape[0] <= to_length: logger.error("zero_pad cannot be performed on matrix with shape {}" " to length {}".format(matrix.shape[0], to_length)) raise ValueError result = np.zeros((to_length,) + matrix.shape[1:]) result[:matrix.shape[0]] = matrix return result
[ "def", "zero_pad", "(", "matrix", ",", "to_length", ")", ":", "assert", "matrix", ".", "shape", "[", "0", "]", "<=", "to_length", "if", "not", "matrix", ".", "shape", "[", "0", "]", "<=", "to_length", ":", "logger", ".", "error", "(", "\"zero_pad cannot be performed on matrix with shape {}\"", "\" to length {}\"", ".", "format", "(", "matrix", ".", "shape", "[", "0", "]", ",", "to_length", ")", ")", "raise", "ValueError", "result", "=", "np", ".", "zeros", "(", "(", "to_length", ",", ")", "+", "matrix", ".", "shape", "[", "1", ":", "]", ")", "result", "[", ":", "matrix", ".", "shape", "[", "0", "]", "]", "=", "matrix", "return", "result" ]
Zero pads along the 0th dimension to make sure the utterance array x is of length to_length.
[ "Zero", "pads", "along", "the", "0th", "dimension", "to", "make", "sure", "the", "utterance", "array", "x", "is", "of", "length", "to_length", "." ]
f94c63e4d5fe719fb1deba449b177bb299d225fb
https://github.com/persephone-tools/persephone/blob/f94c63e4d5fe719fb1deba449b177bb299d225fb/persephone/utils.py#L58-L69
train
persephone-tools/persephone
persephone/utils.py
load_batch_x
def load_batch_x(path_batch, flatten = False, time_major = False): """ Loads a batch of input features given a list of paths to numpy arrays in that batch.""" utterances = [np.load(str(path)) for path in path_batch] utter_lens = [utterance.shape[0] for utterance in utterances] max_len = max(utter_lens) batch_size = len(path_batch) shape = (batch_size, max_len) + tuple(utterances[0].shape[1:]) batch = np.zeros(shape) for i, utt in enumerate(utterances): batch[i] = zero_pad(utt, max_len) if flatten: batch = collapse(batch, time_major=time_major) return batch, np.array(utter_lens)
python
def load_batch_x(path_batch, flatten = False, time_major = False): """ Loads a batch of input features given a list of paths to numpy arrays in that batch.""" utterances = [np.load(str(path)) for path in path_batch] utter_lens = [utterance.shape[0] for utterance in utterances] max_len = max(utter_lens) batch_size = len(path_batch) shape = (batch_size, max_len) + tuple(utterances[0].shape[1:]) batch = np.zeros(shape) for i, utt in enumerate(utterances): batch[i] = zero_pad(utt, max_len) if flatten: batch = collapse(batch, time_major=time_major) return batch, np.array(utter_lens)
[ "def", "load_batch_x", "(", "path_batch", ",", "flatten", "=", "False", ",", "time_major", "=", "False", ")", ":", "utterances", "=", "[", "np", ".", "load", "(", "str", "(", "path", ")", ")", "for", "path", "in", "path_batch", "]", "utter_lens", "=", "[", "utterance", ".", "shape", "[", "0", "]", "for", "utterance", "in", "utterances", "]", "max_len", "=", "max", "(", "utter_lens", ")", "batch_size", "=", "len", "(", "path_batch", ")", "shape", "=", "(", "batch_size", ",", "max_len", ")", "+", "tuple", "(", "utterances", "[", "0", "]", ".", "shape", "[", "1", ":", "]", ")", "batch", "=", "np", ".", "zeros", "(", "shape", ")", "for", "i", ",", "utt", "in", "enumerate", "(", "utterances", ")", ":", "batch", "[", "i", "]", "=", "zero_pad", "(", "utt", ",", "max_len", ")", "if", "flatten", ":", "batch", "=", "collapse", "(", "batch", ",", "time_major", "=", "time_major", ")", "return", "batch", ",", "np", ".", "array", "(", "utter_lens", ")" ]
Loads a batch of input features given a list of paths to numpy arrays in that batch.
[ "Loads", "a", "batch", "of", "input", "features", "given", "a", "list", "of", "paths", "to", "numpy", "arrays", "in", "that", "batch", "." ]
f94c63e4d5fe719fb1deba449b177bb299d225fb
https://github.com/persephone-tools/persephone/blob/f94c63e4d5fe719fb1deba449b177bb299d225fb/persephone/utils.py#L88-L104
train
persephone-tools/persephone
persephone/utils.py
batch_per
def batch_per(hyps: Sequence[Sequence[T]], refs: Sequence[Sequence[T]]) -> float: """ Calculates the phoneme error rate of a batch.""" macro_per = 0.0 for i in range(len(hyps)): ref = [phn_i for phn_i in refs[i] if phn_i != 0] hyp = [phn_i for phn_i in hyps[i] if phn_i != 0] macro_per += distance.edit_distance(ref, hyp)/len(ref) return macro_per/len(hyps)
python
def batch_per(hyps: Sequence[Sequence[T]], refs: Sequence[Sequence[T]]) -> float: """ Calculates the phoneme error rate of a batch.""" macro_per = 0.0 for i in range(len(hyps)): ref = [phn_i for phn_i in refs[i] if phn_i != 0] hyp = [phn_i for phn_i in hyps[i] if phn_i != 0] macro_per += distance.edit_distance(ref, hyp)/len(ref) return macro_per/len(hyps)
[ "def", "batch_per", "(", "hyps", ":", "Sequence", "[", "Sequence", "[", "T", "]", "]", ",", "refs", ":", "Sequence", "[", "Sequence", "[", "T", "]", "]", ")", "->", "float", ":", "macro_per", "=", "0.0", "for", "i", "in", "range", "(", "len", "(", "hyps", ")", ")", ":", "ref", "=", "[", "phn_i", "for", "phn_i", "in", "refs", "[", "i", "]", "if", "phn_i", "!=", "0", "]", "hyp", "=", "[", "phn_i", "for", "phn_i", "in", "hyps", "[", "i", "]", "if", "phn_i", "!=", "0", "]", "macro_per", "+=", "distance", ".", "edit_distance", "(", "ref", ",", "hyp", ")", "/", "len", "(", "ref", ")", "return", "macro_per", "/", "len", "(", "hyps", ")" ]
Calculates the phoneme error rate of a batch.
[ "Calculates", "the", "phoneme", "error", "rate", "of", "a", "batch", "." ]
f94c63e4d5fe719fb1deba449b177bb299d225fb
https://github.com/persephone-tools/persephone/blob/f94c63e4d5fe719fb1deba449b177bb299d225fb/persephone/utils.py#L106-L115
train
persephone-tools/persephone
persephone/utils.py
filter_by_size
def filter_by_size(feat_dir: Path, prefixes: List[str], feat_type: str, max_samples: int) -> List[str]: """ Sorts the files by their length and returns those with less than or equal to max_samples length. Returns the filename prefixes of those files. The main job of the method is to filter, but the sorting may give better efficiency when doing dynamic batching unless it gets shuffled downstream. """ # TODO Tell the user what utterances we are removing. prefix_lens = get_prefix_lens(Path(feat_dir), prefixes, feat_type) prefixes = [prefix for prefix, length in prefix_lens if length <= max_samples] return prefixes
python
def filter_by_size(feat_dir: Path, prefixes: List[str], feat_type: str, max_samples: int) -> List[str]: """ Sorts the files by their length and returns those with less than or equal to max_samples length. Returns the filename prefixes of those files. The main job of the method is to filter, but the sorting may give better efficiency when doing dynamic batching unless it gets shuffled downstream. """ # TODO Tell the user what utterances we are removing. prefix_lens = get_prefix_lens(Path(feat_dir), prefixes, feat_type) prefixes = [prefix for prefix, length in prefix_lens if length <= max_samples] return prefixes
[ "def", "filter_by_size", "(", "feat_dir", ":", "Path", ",", "prefixes", ":", "List", "[", "str", "]", ",", "feat_type", ":", "str", ",", "max_samples", ":", "int", ")", "->", "List", "[", "str", "]", ":", "# TODO Tell the user what utterances we are removing.", "prefix_lens", "=", "get_prefix_lens", "(", "Path", "(", "feat_dir", ")", ",", "prefixes", ",", "feat_type", ")", "prefixes", "=", "[", "prefix", "for", "prefix", ",", "length", "in", "prefix_lens", "if", "length", "<=", "max_samples", "]", "return", "prefixes" ]
Sorts the files by their length and returns those with less than or equal to max_samples length. Returns the filename prefixes of those files. The main job of the method is to filter, but the sorting may give better efficiency when doing dynamic batching unless it gets shuffled downstream.
[ "Sorts", "the", "files", "by", "their", "length", "and", "returns", "those", "with", "less", "than", "or", "equal", "to", "max_samples", "length", ".", "Returns", "the", "filename", "prefixes", "of", "those", "files", ".", "The", "main", "job", "of", "the", "method", "is", "to", "filter", "but", "the", "sorting", "may", "give", "better", "efficiency", "when", "doing", "dynamic", "batching", "unless", "it", "gets", "shuffled", "downstream", "." ]
f94c63e4d5fe719fb1deba449b177bb299d225fb
https://github.com/persephone-tools/persephone/blob/f94c63e4d5fe719fb1deba449b177bb299d225fb/persephone/utils.py#L141-L154
train
persephone-tools/persephone
persephone/utils.py
wav_length
def wav_length(fn: str) -> float: """ Returns the length of the WAV file in seconds.""" args = [config.SOX_PATH, fn, "-n", "stat"] p = subprocess.Popen( args, stdin=PIPE, stdout=PIPE, stderr=PIPE) length_line = str(p.communicate()[1]).split("\\n")[1].split() print(length_line) assert length_line[0] == "Length" return float(length_line[-1])
python
def wav_length(fn: str) -> float: """ Returns the length of the WAV file in seconds.""" args = [config.SOX_PATH, fn, "-n", "stat"] p = subprocess.Popen( args, stdin=PIPE, stdout=PIPE, stderr=PIPE) length_line = str(p.communicate()[1]).split("\\n")[1].split() print(length_line) assert length_line[0] == "Length" return float(length_line[-1])
[ "def", "wav_length", "(", "fn", ":", "str", ")", "->", "float", ":", "args", "=", "[", "config", ".", "SOX_PATH", ",", "fn", ",", "\"-n\"", ",", "\"stat\"", "]", "p", "=", "subprocess", ".", "Popen", "(", "args", ",", "stdin", "=", "PIPE", ",", "stdout", "=", "PIPE", ",", "stderr", "=", "PIPE", ")", "length_line", "=", "str", "(", "p", ".", "communicate", "(", ")", "[", "1", "]", ")", ".", "split", "(", "\"\\\\n\"", ")", "[", "1", "]", ".", "split", "(", ")", "print", "(", "length_line", ")", "assert", "length_line", "[", "0", "]", "==", "\"Length\"", "return", "float", "(", "length_line", "[", "-", "1", "]", ")" ]
Returns the length of the WAV file in seconds.
[ "Returns", "the", "length", "of", "the", "WAV", "file", "in", "seconds", "." ]
f94c63e4d5fe719fb1deba449b177bb299d225fb
https://github.com/persephone-tools/persephone/blob/f94c63e4d5fe719fb1deba449b177bb299d225fb/persephone/utils.py#L170-L179
train
persephone-tools/persephone
persephone/datasets/bkw.py
pull_en_words
def pull_en_words() -> None: """ Fetches a repository containing English words. """ ENGLISH_WORDS_URL = "https://github.com/dwyl/english-words.git" en_words_path = Path(config.EN_WORDS_PATH) if not en_words_path.is_file(): subprocess.run(["git", "clone", ENGLISH_WORDS_URL, str(en_words_path.parent)])
python
def pull_en_words() -> None: """ Fetches a repository containing English words. """ ENGLISH_WORDS_URL = "https://github.com/dwyl/english-words.git" en_words_path = Path(config.EN_WORDS_PATH) if not en_words_path.is_file(): subprocess.run(["git", "clone", ENGLISH_WORDS_URL, str(en_words_path.parent)])
[ "def", "pull_en_words", "(", ")", "->", "None", ":", "ENGLISH_WORDS_URL", "=", "\"https://github.com/dwyl/english-words.git\"", "en_words_path", "=", "Path", "(", "config", ".", "EN_WORDS_PATH", ")", "if", "not", "en_words_path", ".", "is_file", "(", ")", ":", "subprocess", ".", "run", "(", "[", "\"git\"", ",", "\"clone\"", ",", "ENGLISH_WORDS_URL", ",", "str", "(", "en_words_path", ".", "parent", ")", "]", ")" ]
Fetches a repository containing English words.
[ "Fetches", "a", "repository", "containing", "English", "words", "." ]
f94c63e4d5fe719fb1deba449b177bb299d225fb
https://github.com/persephone-tools/persephone/blob/f94c63e4d5fe719fb1deba449b177bb299d225fb/persephone/datasets/bkw.py#L27-L34
train
persephone-tools/persephone
persephone/datasets/bkw.py
get_en_words
def get_en_words() -> Set[str]: """ Returns a list of English words which can be used to filter out code-switched sentences. """ pull_en_words() with open(config.EN_WORDS_PATH) as words_f: raw_words = words_f.readlines() en_words = set([word.strip().lower() for word in raw_words]) NA_WORDS_IN_EN_DICT = set(["kore", "nani", "karri", "imi", "o", "yaw", "i", "bi", "aye", "imi", "ane", "kubba", "kab", "a-", "ad", "a", "mak", "selim", "ngai", "en", "yo", "wud", "mani", "yak", "manu", "ka-", "mong", "manga", "ka-", "mane", "kala", "name", "kayo", "kare", "laik", "bale", "ni", "rey", "bu", "re", "iman", "bom", "wam", "alu", "nan", "kure", "kuri", "wam", "ka", "ng", "yi", "na", "m", "arri", "e", "kele", "arri", "nga", "kakan", "ai", "ning", "mala", "ti", "wolk", "bo", "andi", "ken", "ba", "aa", "kun", "bini", "wo", "bim", "man", "bord", "al", "mah", "won", "ku", "ay", "belen", "wen", "yah", "muni", "bah", "di", "mm", "anu", "nane", "ma", "kum", "birri", "ray", "h", "kane", "mumu", "bi", "ah", "i-", "n", "mi", "bedman", "rud", "le", "babu", "da", "kakkak", "yun", "ande", "naw", "kam", "bolk", "woy", "u", "bi-", ]) EN_WORDS_NOT_IN_EN_DICT = set(["screenprinting"]) en_words = en_words.difference(NA_WORDS_IN_EN_DICT) en_words = en_words | EN_WORDS_NOT_IN_EN_DICT return en_words
python
def get_en_words() -> Set[str]: """ Returns a list of English words which can be used to filter out code-switched sentences. """ pull_en_words() with open(config.EN_WORDS_PATH) as words_f: raw_words = words_f.readlines() en_words = set([word.strip().lower() for word in raw_words]) NA_WORDS_IN_EN_DICT = set(["kore", "nani", "karri", "imi", "o", "yaw", "i", "bi", "aye", "imi", "ane", "kubba", "kab", "a-", "ad", "a", "mak", "selim", "ngai", "en", "yo", "wud", "mani", "yak", "manu", "ka-", "mong", "manga", "ka-", "mane", "kala", "name", "kayo", "kare", "laik", "bale", "ni", "rey", "bu", "re", "iman", "bom", "wam", "alu", "nan", "kure", "kuri", "wam", "ka", "ng", "yi", "na", "m", "arri", "e", "kele", "arri", "nga", "kakan", "ai", "ning", "mala", "ti", "wolk", "bo", "andi", "ken", "ba", "aa", "kun", "bini", "wo", "bim", "man", "bord", "al", "mah", "won", "ku", "ay", "belen", "wen", "yah", "muni", "bah", "di", "mm", "anu", "nane", "ma", "kum", "birri", "ray", "h", "kane", "mumu", "bi", "ah", "i-", "n", "mi", "bedman", "rud", "le", "babu", "da", "kakkak", "yun", "ande", "naw", "kam", "bolk", "woy", "u", "bi-", ]) EN_WORDS_NOT_IN_EN_DICT = set(["screenprinting"]) en_words = en_words.difference(NA_WORDS_IN_EN_DICT) en_words = en_words | EN_WORDS_NOT_IN_EN_DICT return en_words
[ "def", "get_en_words", "(", ")", "->", "Set", "[", "str", "]", ":", "pull_en_words", "(", ")", "with", "open", "(", "config", ".", "EN_WORDS_PATH", ")", "as", "words_f", ":", "raw_words", "=", "words_f", ".", "readlines", "(", ")", "en_words", "=", "set", "(", "[", "word", ".", "strip", "(", ")", ".", "lower", "(", ")", "for", "word", "in", "raw_words", "]", ")", "NA_WORDS_IN_EN_DICT", "=", "set", "(", "[", "\"kore\"", ",", "\"nani\"", ",", "\"karri\"", ",", "\"imi\"", ",", "\"o\"", ",", "\"yaw\"", ",", "\"i\"", ",", "\"bi\"", ",", "\"aye\"", ",", "\"imi\"", ",", "\"ane\"", ",", "\"kubba\"", ",", "\"kab\"", ",", "\"a-\"", ",", "\"ad\"", ",", "\"a\"", ",", "\"mak\"", ",", "\"selim\"", ",", "\"ngai\"", ",", "\"en\"", ",", "\"yo\"", ",", "\"wud\"", ",", "\"mani\"", ",", "\"yak\"", ",", "\"manu\"", ",", "\"ka-\"", ",", "\"mong\"", ",", "\"manga\"", ",", "\"ka-\"", ",", "\"mane\"", ",", "\"kala\"", ",", "\"name\"", ",", "\"kayo\"", ",", "\"kare\"", ",", "\"laik\"", ",", "\"bale\"", ",", "\"ni\"", ",", "\"rey\"", ",", "\"bu\"", ",", "\"re\"", ",", "\"iman\"", ",", "\"bom\"", ",", "\"wam\"", ",", "\"alu\"", ",", "\"nan\"", ",", "\"kure\"", ",", "\"kuri\"", ",", "\"wam\"", ",", "\"ka\"", ",", "\"ng\"", ",", "\"yi\"", ",", "\"na\"", ",", "\"m\"", ",", "\"arri\"", ",", "\"e\"", ",", "\"kele\"", ",", "\"arri\"", ",", "\"nga\"", ",", "\"kakan\"", ",", "\"ai\"", ",", "\"ning\"", ",", "\"mala\"", ",", "\"ti\"", ",", "\"wolk\"", ",", "\"bo\"", ",", "\"andi\"", ",", "\"ken\"", ",", "\"ba\"", ",", "\"aa\"", ",", "\"kun\"", ",", "\"bini\"", ",", "\"wo\"", ",", "\"bim\"", ",", "\"man\"", ",", "\"bord\"", ",", "\"al\"", ",", "\"mah\"", ",", "\"won\"", ",", "\"ku\"", ",", "\"ay\"", ",", "\"belen\"", ",", "\"wen\"", ",", "\"yah\"", ",", "\"muni\"", ",", "\"bah\"", ",", "\"di\"", ",", "\"mm\"", ",", "\"anu\"", ",", "\"nane\"", ",", "\"ma\"", ",", "\"kum\"", ",", "\"birri\"", ",", "\"ray\"", ",", "\"h\"", ",", "\"kane\"", ",", "\"mumu\"", ",", "\"bi\"", ",", "\"ah\"", ",", "\"i-\"", ",", "\"n\"", ",", "\"mi\"", ",", "\"bedman\"", ",", "\"rud\"", ",", "\"le\"", ",", "\"babu\"", ",", "\"da\"", ",", "\"kakkak\"", ",", "\"yun\"", ",", "\"ande\"", ",", "\"naw\"", ",", "\"kam\"", ",", "\"bolk\"", ",", "\"woy\"", ",", "\"u\"", ",", "\"bi-\"", ",", "]", ")", "EN_WORDS_NOT_IN_EN_DICT", "=", "set", "(", "[", "\"screenprinting\"", "]", ")", "en_words", "=", "en_words", ".", "difference", "(", "NA_WORDS_IN_EN_DICT", ")", "en_words", "=", "en_words", "|", "EN_WORDS_NOT_IN_EN_DICT", "return", "en_words" ]
Returns a list of English words which can be used to filter out code-switched sentences.
[ "Returns", "a", "list", "of", "English", "words", "which", "can", "be", "used", "to", "filter", "out", "code", "-", "switched", "sentences", "." ]
f94c63e4d5fe719fb1deba449b177bb299d225fb
https://github.com/persephone-tools/persephone/blob/f94c63e4d5fe719fb1deba449b177bb299d225fb/persephone/datasets/bkw.py#L36-L69
train
persephone-tools/persephone
persephone/datasets/bkw.py
explore_elan_files
def explore_elan_files(elan_paths): """ A function to explore the tiers of ELAN files. """ for elan_path in elan_paths: print(elan_path) eafob = Eaf(elan_path) tier_names = eafob.get_tier_names() for tier in tier_names: print("\t", tier) try: for annotation in eafob.get_annotation_data_for_tier(tier): print("\t\t", annotation) except KeyError: continue input()
python
def explore_elan_files(elan_paths): """ A function to explore the tiers of ELAN files. """ for elan_path in elan_paths: print(elan_path) eafob = Eaf(elan_path) tier_names = eafob.get_tier_names() for tier in tier_names: print("\t", tier) try: for annotation in eafob.get_annotation_data_for_tier(tier): print("\t\t", annotation) except KeyError: continue input()
[ "def", "explore_elan_files", "(", "elan_paths", ")", ":", "for", "elan_path", "in", "elan_paths", ":", "print", "(", "elan_path", ")", "eafob", "=", "Eaf", "(", "elan_path", ")", "tier_names", "=", "eafob", ".", "get_tier_names", "(", ")", "for", "tier", "in", "tier_names", ":", "print", "(", "\"\\t\"", ",", "tier", ")", "try", ":", "for", "annotation", "in", "eafob", ".", "get_annotation_data_for_tier", "(", "tier", ")", ":", "print", "(", "\"\\t\\t\"", ",", "annotation", ")", "except", "KeyError", ":", "continue", "input", "(", ")" ]
A function to explore the tiers of ELAN files.
[ "A", "function", "to", "explore", "the", "tiers", "of", "ELAN", "files", "." ]
f94c63e4d5fe719fb1deba449b177bb299d225fb
https://github.com/persephone-tools/persephone/blob/f94c63e4d5fe719fb1deba449b177bb299d225fb/persephone/datasets/bkw.py#L73-L90
train
persephone-tools/persephone
persephone/preprocess/elan.py
sort_annotations
def sort_annotations(annotations: List[Tuple[int, int, str]] ) -> List[Tuple[int, int, str]]: """ Sorts the annotations by their start_time. """ return sorted(annotations, key=lambda x: x[0])
python
def sort_annotations(annotations: List[Tuple[int, int, str]] ) -> List[Tuple[int, int, str]]: """ Sorts the annotations by their start_time. """ return sorted(annotations, key=lambda x: x[0])
[ "def", "sort_annotations", "(", "annotations", ":", "List", "[", "Tuple", "[", "int", ",", "int", ",", "str", "]", "]", ")", "->", "List", "[", "Tuple", "[", "int", ",", "int", ",", "str", "]", "]", ":", "return", "sorted", "(", "annotations", ",", "key", "=", "lambda", "x", ":", "x", "[", "0", "]", ")" ]
Sorts the annotations by their start_time.
[ "Sorts", "the", "annotations", "by", "their", "start_time", "." ]
f94c63e4d5fe719fb1deba449b177bb299d225fb
https://github.com/persephone-tools/persephone/blob/f94c63e4d5fe719fb1deba449b177bb299d225fb/persephone/preprocess/elan.py#L62-L65
train
persephone-tools/persephone
persephone/preprocess/elan.py
utterances_from_tier
def utterances_from_tier(eafob: Eaf, tier_name: str) -> List[Utterance]: """ Returns utterances found in the given Eaf object in the given tier.""" try: speaker = eafob.tiers[tier_name][2]["PARTICIPANT"] except KeyError: speaker = None # We don't know the name of the speaker. tier_utterances = [] annotations = sort_annotations( list(eafob.get_annotation_data_for_tier(tier_name))) for i, annotation in enumerate(annotations): eaf_stem = eafob.eaf_path.stem utter_id = "{}.{}.{}".format(eaf_stem, tier_name, i) start_time = eafob.time_origin + annotation[0] end_time = eafob.time_origin + annotation[1] text = annotation[2] utterance = Utterance(eafob.media_path, eafob.eaf_path, utter_id, start_time, end_time, text, speaker) tier_utterances.append(utterance) return tier_utterances
python
def utterances_from_tier(eafob: Eaf, tier_name: str) -> List[Utterance]: """ Returns utterances found in the given Eaf object in the given tier.""" try: speaker = eafob.tiers[tier_name][2]["PARTICIPANT"] except KeyError: speaker = None # We don't know the name of the speaker. tier_utterances = [] annotations = sort_annotations( list(eafob.get_annotation_data_for_tier(tier_name))) for i, annotation in enumerate(annotations): eaf_stem = eafob.eaf_path.stem utter_id = "{}.{}.{}".format(eaf_stem, tier_name, i) start_time = eafob.time_origin + annotation[0] end_time = eafob.time_origin + annotation[1] text = annotation[2] utterance = Utterance(eafob.media_path, eafob.eaf_path, utter_id, start_time, end_time, text, speaker) tier_utterances.append(utterance) return tier_utterances
[ "def", "utterances_from_tier", "(", "eafob", ":", "Eaf", ",", "tier_name", ":", "str", ")", "->", "List", "[", "Utterance", "]", ":", "try", ":", "speaker", "=", "eafob", ".", "tiers", "[", "tier_name", "]", "[", "2", "]", "[", "\"PARTICIPANT\"", "]", "except", "KeyError", ":", "speaker", "=", "None", "# We don't know the name of the speaker.", "tier_utterances", "=", "[", "]", "annotations", "=", "sort_annotations", "(", "list", "(", "eafob", ".", "get_annotation_data_for_tier", "(", "tier_name", ")", ")", ")", "for", "i", ",", "annotation", "in", "enumerate", "(", "annotations", ")", ":", "eaf_stem", "=", "eafob", ".", "eaf_path", ".", "stem", "utter_id", "=", "\"{}.{}.{}\"", ".", "format", "(", "eaf_stem", ",", "tier_name", ",", "i", ")", "start_time", "=", "eafob", ".", "time_origin", "+", "annotation", "[", "0", "]", "end_time", "=", "eafob", ".", "time_origin", "+", "annotation", "[", "1", "]", "text", "=", "annotation", "[", "2", "]", "utterance", "=", "Utterance", "(", "eafob", ".", "media_path", ",", "eafob", ".", "eaf_path", ",", "utter_id", ",", "start_time", ",", "end_time", ",", "text", ",", "speaker", ")", "tier_utterances", ".", "append", "(", "utterance", ")", "return", "tier_utterances" ]
Returns utterances found in the given Eaf object in the given tier.
[ "Returns", "utterances", "found", "in", "the", "given", "Eaf", "object", "in", "the", "given", "tier", "." ]
f94c63e4d5fe719fb1deba449b177bb299d225fb
https://github.com/persephone-tools/persephone/blob/f94c63e4d5fe719fb1deba449b177bb299d225fb/persephone/preprocess/elan.py#L68-L91
train
persephone-tools/persephone
persephone/preprocess/elan.py
utterances_from_eaf
def utterances_from_eaf(eaf_path: Path, tier_prefixes: Tuple[str, ...]) -> List[Utterance]: """ Extracts utterances in tiers that start with tier_prefixes found in the ELAN .eaf XML file at eaf_path. For example, if xv@Mark is a tier in the eaf file, and tier_prefixes = ["xv"], then utterances from that tier will be gathered. """ if not eaf_path.is_file(): raise FileNotFoundError("Cannot find {}".format(eaf_path)) eaf = Eaf(eaf_path) utterances = [] for tier_name in sorted(list(eaf.tiers)): # Sorting for determinism for tier_prefix in tier_prefixes: if tier_name.startswith(tier_prefix): utterances.extend(utterances_from_tier(eaf, tier_name)) break return utterances
python
def utterances_from_eaf(eaf_path: Path, tier_prefixes: Tuple[str, ...]) -> List[Utterance]: """ Extracts utterances in tiers that start with tier_prefixes found in the ELAN .eaf XML file at eaf_path. For example, if xv@Mark is a tier in the eaf file, and tier_prefixes = ["xv"], then utterances from that tier will be gathered. """ if not eaf_path.is_file(): raise FileNotFoundError("Cannot find {}".format(eaf_path)) eaf = Eaf(eaf_path) utterances = [] for tier_name in sorted(list(eaf.tiers)): # Sorting for determinism for tier_prefix in tier_prefixes: if tier_name.startswith(tier_prefix): utterances.extend(utterances_from_tier(eaf, tier_name)) break return utterances
[ "def", "utterances_from_eaf", "(", "eaf_path", ":", "Path", ",", "tier_prefixes", ":", "Tuple", "[", "str", ",", "...", "]", ")", "->", "List", "[", "Utterance", "]", ":", "if", "not", "eaf_path", ".", "is_file", "(", ")", ":", "raise", "FileNotFoundError", "(", "\"Cannot find {}\"", ".", "format", "(", "eaf_path", ")", ")", "eaf", "=", "Eaf", "(", "eaf_path", ")", "utterances", "=", "[", "]", "for", "tier_name", "in", "sorted", "(", "list", "(", "eaf", ".", "tiers", ")", ")", ":", "# Sorting for determinism", "for", "tier_prefix", "in", "tier_prefixes", ":", "if", "tier_name", ".", "startswith", "(", "tier_prefix", ")", ":", "utterances", ".", "extend", "(", "utterances_from_tier", "(", "eaf", ",", "tier_name", ")", ")", "break", "return", "utterances" ]
Extracts utterances in tiers that start with tier_prefixes found in the ELAN .eaf XML file at eaf_path. For example, if xv@Mark is a tier in the eaf file, and tier_prefixes = ["xv"], then utterances from that tier will be gathered.
[ "Extracts", "utterances", "in", "tiers", "that", "start", "with", "tier_prefixes", "found", "in", "the", "ELAN", ".", "eaf", "XML", "file", "at", "eaf_path", "." ]
f94c63e4d5fe719fb1deba449b177bb299d225fb
https://github.com/persephone-tools/persephone/blob/f94c63e4d5fe719fb1deba449b177bb299d225fb/persephone/preprocess/elan.py#L94-L113
train
persephone-tools/persephone
persephone/preprocess/elan.py
utterances_from_dir
def utterances_from_dir(eaf_dir: Path, tier_prefixes: Tuple[str, ...]) -> List[Utterance]: """ Returns the utterances found in ELAN files in a directory. Recursively explores the directory, gathering ELAN files and extracting utterances from them for tiers that start with the specified prefixes. Args: eaf_dir: A path to the directory to be searched tier_prefixes: Stings matching the start of ELAN tier names that are to be extracted. For example, if you want to extract from tiers "xv-Jane" and "xv-Mark", then tier_prefixes = ["xv"] would do the job. Returns: A list of Utterance objects. """ logger.info( "EAF from directory: {}, searching with tier_prefixes {}".format( eaf_dir, tier_prefixes)) utterances = [] for eaf_path in eaf_dir.glob("**/*.eaf"): eaf_utterances = utterances_from_eaf(eaf_path, tier_prefixes) utterances.extend(eaf_utterances) return utterances
python
def utterances_from_dir(eaf_dir: Path, tier_prefixes: Tuple[str, ...]) -> List[Utterance]: """ Returns the utterances found in ELAN files in a directory. Recursively explores the directory, gathering ELAN files and extracting utterances from them for tiers that start with the specified prefixes. Args: eaf_dir: A path to the directory to be searched tier_prefixes: Stings matching the start of ELAN tier names that are to be extracted. For example, if you want to extract from tiers "xv-Jane" and "xv-Mark", then tier_prefixes = ["xv"] would do the job. Returns: A list of Utterance objects. """ logger.info( "EAF from directory: {}, searching with tier_prefixes {}".format( eaf_dir, tier_prefixes)) utterances = [] for eaf_path in eaf_dir.glob("**/*.eaf"): eaf_utterances = utterances_from_eaf(eaf_path, tier_prefixes) utterances.extend(eaf_utterances) return utterances
[ "def", "utterances_from_dir", "(", "eaf_dir", ":", "Path", ",", "tier_prefixes", ":", "Tuple", "[", "str", ",", "...", "]", ")", "->", "List", "[", "Utterance", "]", ":", "logger", ".", "info", "(", "\"EAF from directory: {}, searching with tier_prefixes {}\"", ".", "format", "(", "eaf_dir", ",", "tier_prefixes", ")", ")", "utterances", "=", "[", "]", "for", "eaf_path", "in", "eaf_dir", ".", "glob", "(", "\"**/*.eaf\"", ")", ":", "eaf_utterances", "=", "utterances_from_eaf", "(", "eaf_path", ",", "tier_prefixes", ")", "utterances", ".", "extend", "(", "eaf_utterances", ")", "return", "utterances" ]
Returns the utterances found in ELAN files in a directory. Recursively explores the directory, gathering ELAN files and extracting utterances from them for tiers that start with the specified prefixes. Args: eaf_dir: A path to the directory to be searched tier_prefixes: Stings matching the start of ELAN tier names that are to be extracted. For example, if you want to extract from tiers "xv-Jane" and "xv-Mark", then tier_prefixes = ["xv"] would do the job. Returns: A list of Utterance objects.
[ "Returns", "the", "utterances", "found", "in", "ELAN", "files", "in", "a", "directory", "." ]
f94c63e4d5fe719fb1deba449b177bb299d225fb
https://github.com/persephone-tools/persephone/blob/f94c63e4d5fe719fb1deba449b177bb299d225fb/persephone/preprocess/elan.py#L116-L142
train
persephone-tools/persephone
persephone/corpus_reader.py
CorpusReader.load_batch
def load_batch(self, fn_batch): """ Loads a batch with the given prefixes. The prefixes is the full path to the training example minus the extension. """ # TODO Assumes targets are available, which is how its distinct from # utils.load_batch_x(). These functions need to change names to be # clearer. inverse = list(zip(*fn_batch)) feat_fn_batch = inverse[0] target_fn_batch = inverse[1] batch_inputs, batch_inputs_lens = utils.load_batch_x(feat_fn_batch, flatten=False) batch_targets_list = [] for targets_path in target_fn_batch: with open(targets_path, encoding=ENCODING) as targets_f: target_indices = self.corpus.labels_to_indices(targets_f.readline().split()) batch_targets_list.append(target_indices) batch_targets = utils.target_list_to_sparse_tensor(batch_targets_list) return batch_inputs, batch_inputs_lens, batch_targets
python
def load_batch(self, fn_batch): """ Loads a batch with the given prefixes. The prefixes is the full path to the training example minus the extension. """ # TODO Assumes targets are available, which is how its distinct from # utils.load_batch_x(). These functions need to change names to be # clearer. inverse = list(zip(*fn_batch)) feat_fn_batch = inverse[0] target_fn_batch = inverse[1] batch_inputs, batch_inputs_lens = utils.load_batch_x(feat_fn_batch, flatten=False) batch_targets_list = [] for targets_path in target_fn_batch: with open(targets_path, encoding=ENCODING) as targets_f: target_indices = self.corpus.labels_to_indices(targets_f.readline().split()) batch_targets_list.append(target_indices) batch_targets = utils.target_list_to_sparse_tensor(batch_targets_list) return batch_inputs, batch_inputs_lens, batch_targets
[ "def", "load_batch", "(", "self", ",", "fn_batch", ")", ":", "# TODO Assumes targets are available, which is how its distinct from", "# utils.load_batch_x(). These functions need to change names to be", "# clearer.", "inverse", "=", "list", "(", "zip", "(", "*", "fn_batch", ")", ")", "feat_fn_batch", "=", "inverse", "[", "0", "]", "target_fn_batch", "=", "inverse", "[", "1", "]", "batch_inputs", ",", "batch_inputs_lens", "=", "utils", ".", "load_batch_x", "(", "feat_fn_batch", ",", "flatten", "=", "False", ")", "batch_targets_list", "=", "[", "]", "for", "targets_path", "in", "target_fn_batch", ":", "with", "open", "(", "targets_path", ",", "encoding", "=", "ENCODING", ")", "as", "targets_f", ":", "target_indices", "=", "self", ".", "corpus", ".", "labels_to_indices", "(", "targets_f", ".", "readline", "(", ")", ".", "split", "(", ")", ")", "batch_targets_list", ".", "append", "(", "target_indices", ")", "batch_targets", "=", "utils", ".", "target_list_to_sparse_tensor", "(", "batch_targets_list", ")", "return", "batch_inputs", ",", "batch_inputs_lens", ",", "batch_targets" ]
Loads a batch with the given prefixes. The prefixes is the full path to the training example minus the extension.
[ "Loads", "a", "batch", "with", "the", "given", "prefixes", ".", "The", "prefixes", "is", "the", "full", "path", "to", "the", "training", "example", "minus", "the", "extension", "." ]
f94c63e4d5fe719fb1deba449b177bb299d225fb
https://github.com/persephone-tools/persephone/blob/f94c63e4d5fe719fb1deba449b177bb299d225fb/persephone/corpus_reader.py#L95-L117
train
persephone-tools/persephone
persephone/corpus_reader.py
CorpusReader.train_batch_gen
def train_batch_gen(self) -> Iterator: """ Returns a generator that outputs batches in the training data.""" if len(self.train_fns) == 0: raise PersephoneException("""No training data available; cannot generate training batches.""") # Create batches of batch_size and shuffle them. fn_batches = self.make_batches(self.train_fns) if self.rand: random.shuffle(fn_batches) for fn_batch in fn_batches: logger.debug("Batch of training filenames: %s", pprint.pformat(fn_batch)) yield self.load_batch(fn_batch) else: raise StopIteration
python
def train_batch_gen(self) -> Iterator: """ Returns a generator that outputs batches in the training data.""" if len(self.train_fns) == 0: raise PersephoneException("""No training data available; cannot generate training batches.""") # Create batches of batch_size and shuffle them. fn_batches = self.make_batches(self.train_fns) if self.rand: random.shuffle(fn_batches) for fn_batch in fn_batches: logger.debug("Batch of training filenames: %s", pprint.pformat(fn_batch)) yield self.load_batch(fn_batch) else: raise StopIteration
[ "def", "train_batch_gen", "(", "self", ")", "->", "Iterator", ":", "if", "len", "(", "self", ".", "train_fns", ")", "==", "0", ":", "raise", "PersephoneException", "(", "\"\"\"No training data available; cannot\n generate training batches.\"\"\"", ")", "# Create batches of batch_size and shuffle them.", "fn_batches", "=", "self", ".", "make_batches", "(", "self", ".", "train_fns", ")", "if", "self", ".", "rand", ":", "random", ".", "shuffle", "(", "fn_batches", ")", "for", "fn_batch", "in", "fn_batches", ":", "logger", ".", "debug", "(", "\"Batch of training filenames: %s\"", ",", "pprint", ".", "pformat", "(", "fn_batch", ")", ")", "yield", "self", ".", "load_batch", "(", "fn_batch", ")", "else", ":", "raise", "StopIteration" ]
Returns a generator that outputs batches in the training data.
[ "Returns", "a", "generator", "that", "outputs", "batches", "in", "the", "training", "data", "." ]
f94c63e4d5fe719fb1deba449b177bb299d225fb
https://github.com/persephone-tools/persephone/blob/f94c63e4d5fe719fb1deba449b177bb299d225fb/persephone/corpus_reader.py#L125-L144
train
persephone-tools/persephone
persephone/corpus_reader.py
CorpusReader.valid_batch
def valid_batch(self): """ Returns a single batch with all the validation cases.""" valid_fns = list(zip(*self.corpus.get_valid_fns())) return self.load_batch(valid_fns)
python
def valid_batch(self): """ Returns a single batch with all the validation cases.""" valid_fns = list(zip(*self.corpus.get_valid_fns())) return self.load_batch(valid_fns)
[ "def", "valid_batch", "(", "self", ")", ":", "valid_fns", "=", "list", "(", "zip", "(", "*", "self", ".", "corpus", ".", "get_valid_fns", "(", ")", ")", ")", "return", "self", ".", "load_batch", "(", "valid_fns", ")" ]
Returns a single batch with all the validation cases.
[ "Returns", "a", "single", "batch", "with", "all", "the", "validation", "cases", "." ]
f94c63e4d5fe719fb1deba449b177bb299d225fb
https://github.com/persephone-tools/persephone/blob/f94c63e4d5fe719fb1deba449b177bb299d225fb/persephone/corpus_reader.py#L146-L150
train
persephone-tools/persephone
persephone/corpus_reader.py
CorpusReader.untranscribed_batch_gen
def untranscribed_batch_gen(self): """ A batch generator for all the untranscribed data. """ feat_fns = self.corpus.get_untranscribed_fns() fn_batches = self.make_batches(feat_fns) for fn_batch in fn_batches: batch_inputs, batch_inputs_lens = utils.load_batch_x(fn_batch, flatten=False) yield batch_inputs, batch_inputs_lens, fn_batch
python
def untranscribed_batch_gen(self): """ A batch generator for all the untranscribed data. """ feat_fns = self.corpus.get_untranscribed_fns() fn_batches = self.make_batches(feat_fns) for fn_batch in fn_batches: batch_inputs, batch_inputs_lens = utils.load_batch_x(fn_batch, flatten=False) yield batch_inputs, batch_inputs_lens, fn_batch
[ "def", "untranscribed_batch_gen", "(", "self", ")", ":", "feat_fns", "=", "self", ".", "corpus", ".", "get_untranscribed_fns", "(", ")", "fn_batches", "=", "self", ".", "make_batches", "(", "feat_fns", ")", "for", "fn_batch", "in", "fn_batches", ":", "batch_inputs", ",", "batch_inputs_lens", "=", "utils", ".", "load_batch_x", "(", "fn_batch", ",", "flatten", "=", "False", ")", "yield", "batch_inputs", ",", "batch_inputs_lens", ",", "fn_batch" ]
A batch generator for all the untranscribed data.
[ "A", "batch", "generator", "for", "all", "the", "untranscribed", "data", "." ]
f94c63e4d5fe719fb1deba449b177bb299d225fb
https://github.com/persephone-tools/persephone/blob/f94c63e4d5fe719fb1deba449b177bb299d225fb/persephone/corpus_reader.py#L158-L167
train
persephone-tools/persephone
persephone/corpus_reader.py
CorpusReader.human_readable_hyp_ref
def human_readable_hyp_ref(self, dense_decoded, dense_y): """ Returns a human readable version of the hypothesis for manual inspection, along with the reference. """ hyps = [] refs = [] for i in range(len(dense_decoded)): ref = [phn_i for phn_i in dense_y[i] if phn_i != 0] hyp = [phn_i for phn_i in dense_decoded[i] if phn_i != 0] ref = self.corpus.indices_to_labels(ref) hyp = self.corpus.indices_to_labels(hyp) refs.append(ref) hyps.append(hyp) return hyps, refs
python
def human_readable_hyp_ref(self, dense_decoded, dense_y): """ Returns a human readable version of the hypothesis for manual inspection, along with the reference. """ hyps = [] refs = [] for i in range(len(dense_decoded)): ref = [phn_i for phn_i in dense_y[i] if phn_i != 0] hyp = [phn_i for phn_i in dense_decoded[i] if phn_i != 0] ref = self.corpus.indices_to_labels(ref) hyp = self.corpus.indices_to_labels(hyp) refs.append(ref) hyps.append(hyp) return hyps, refs
[ "def", "human_readable_hyp_ref", "(", "self", ",", "dense_decoded", ",", "dense_y", ")", ":", "hyps", "=", "[", "]", "refs", "=", "[", "]", "for", "i", "in", "range", "(", "len", "(", "dense_decoded", ")", ")", ":", "ref", "=", "[", "phn_i", "for", "phn_i", "in", "dense_y", "[", "i", "]", "if", "phn_i", "!=", "0", "]", "hyp", "=", "[", "phn_i", "for", "phn_i", "in", "dense_decoded", "[", "i", "]", "if", "phn_i", "!=", "0", "]", "ref", "=", "self", ".", "corpus", ".", "indices_to_labels", "(", "ref", ")", "hyp", "=", "self", ".", "corpus", ".", "indices_to_labels", "(", "hyp", ")", "refs", ".", "append", "(", "ref", ")", "hyps", ".", "append", "(", "hyp", ")", "return", "hyps", ",", "refs" ]
Returns a human readable version of the hypothesis for manual inspection, along with the reference.
[ "Returns", "a", "human", "readable", "version", "of", "the", "hypothesis", "for", "manual", "inspection", "along", "with", "the", "reference", "." ]
f94c63e4d5fe719fb1deba449b177bb299d225fb
https://github.com/persephone-tools/persephone/blob/f94c63e4d5fe719fb1deba449b177bb299d225fb/persephone/corpus_reader.py#L169-L184
train
persephone-tools/persephone
persephone/corpus_reader.py
CorpusReader.human_readable
def human_readable(self, dense_repr: Sequence[Sequence[int]]) -> List[List[str]]: """ Returns a human readable version of a dense representation of either or reference to facilitate simple manual inspection. """ transcripts = [] for dense_r in dense_repr: non_empty_phonemes = [phn_i for phn_i in dense_r if phn_i != 0] transcript = self.corpus.indices_to_labels(non_empty_phonemes) transcripts.append(transcript) return transcripts
python
def human_readable(self, dense_repr: Sequence[Sequence[int]]) -> List[List[str]]: """ Returns a human readable version of a dense representation of either or reference to facilitate simple manual inspection. """ transcripts = [] for dense_r in dense_repr: non_empty_phonemes = [phn_i for phn_i in dense_r if phn_i != 0] transcript = self.corpus.indices_to_labels(non_empty_phonemes) transcripts.append(transcript) return transcripts
[ "def", "human_readable", "(", "self", ",", "dense_repr", ":", "Sequence", "[", "Sequence", "[", "int", "]", "]", ")", "->", "List", "[", "List", "[", "str", "]", "]", ":", "transcripts", "=", "[", "]", "for", "dense_r", "in", "dense_repr", ":", "non_empty_phonemes", "=", "[", "phn_i", "for", "phn_i", "in", "dense_r", "if", "phn_i", "!=", "0", "]", "transcript", "=", "self", ".", "corpus", ".", "indices_to_labels", "(", "non_empty_phonemes", ")", "transcripts", ".", "append", "(", "transcript", ")", "return", "transcripts" ]
Returns a human readable version of a dense representation of either or reference to facilitate simple manual inspection.
[ "Returns", "a", "human", "readable", "version", "of", "a", "dense", "representation", "of", "either", "or", "reference", "to", "facilitate", "simple", "manual", "inspection", "." ]
f94c63e4d5fe719fb1deba449b177bb299d225fb
https://github.com/persephone-tools/persephone/blob/f94c63e4d5fe719fb1deba449b177bb299d225fb/persephone/corpus_reader.py#L186-L197
train
persephone-tools/persephone
persephone/corpus_reader.py
CorpusReader.calc_time
def calc_time(self) -> None: """ Prints statistics about the the total duration of recordings in the corpus. """ def get_number_of_frames(feat_fns): """ fns: A list of numpy files which contain a number of feature frames. """ total = 0 for feat_fn in feat_fns: num_frames = len(np.load(feat_fn)) total += num_frames return total def numframes_to_minutes(num_frames): # TODO Assumes 10ms strides for the frames. This should generalize to # different frame stride widths, as should feature preparation. minutes = ((num_frames*10)/1000)/60 return minutes total_frames = 0 train_fns = [train_fn[0] for train_fn in self.train_fns] num_train_frames = get_number_of_frames(train_fns) total_frames += num_train_frames num_valid_frames = get_number_of_frames(self.corpus.get_valid_fns()[0]) total_frames += num_valid_frames num_test_frames = get_number_of_frames(self.corpus.get_test_fns()[0]) total_frames += num_test_frames print("Train duration: %0.3f" % numframes_to_minutes(num_train_frames)) print("Validation duration: %0.3f" % numframes_to_minutes(num_valid_frames)) print("Test duration: %0.3f" % numframes_to_minutes(num_test_frames)) print("Total duration: %0.3f" % numframes_to_minutes(total_frames))
python
def calc_time(self) -> None: """ Prints statistics about the the total duration of recordings in the corpus. """ def get_number_of_frames(feat_fns): """ fns: A list of numpy files which contain a number of feature frames. """ total = 0 for feat_fn in feat_fns: num_frames = len(np.load(feat_fn)) total += num_frames return total def numframes_to_minutes(num_frames): # TODO Assumes 10ms strides for the frames. This should generalize to # different frame stride widths, as should feature preparation. minutes = ((num_frames*10)/1000)/60 return minutes total_frames = 0 train_fns = [train_fn[0] for train_fn in self.train_fns] num_train_frames = get_number_of_frames(train_fns) total_frames += num_train_frames num_valid_frames = get_number_of_frames(self.corpus.get_valid_fns()[0]) total_frames += num_valid_frames num_test_frames = get_number_of_frames(self.corpus.get_test_fns()[0]) total_frames += num_test_frames print("Train duration: %0.3f" % numframes_to_minutes(num_train_frames)) print("Validation duration: %0.3f" % numframes_to_minutes(num_valid_frames)) print("Test duration: %0.3f" % numframes_to_minutes(num_test_frames)) print("Total duration: %0.3f" % numframes_to_minutes(total_frames))
[ "def", "calc_time", "(", "self", ")", "->", "None", ":", "def", "get_number_of_frames", "(", "feat_fns", ")", ":", "\"\"\" fns: A list of numpy files which contain a number of feature\n frames. \"\"\"", "total", "=", "0", "for", "feat_fn", "in", "feat_fns", ":", "num_frames", "=", "len", "(", "np", ".", "load", "(", "feat_fn", ")", ")", "total", "+=", "num_frames", "return", "total", "def", "numframes_to_minutes", "(", "num_frames", ")", ":", "# TODO Assumes 10ms strides for the frames. This should generalize to", "# different frame stride widths, as should feature preparation.", "minutes", "=", "(", "(", "num_frames", "*", "10", ")", "/", "1000", ")", "/", "60", "return", "minutes", "total_frames", "=", "0", "train_fns", "=", "[", "train_fn", "[", "0", "]", "for", "train_fn", "in", "self", ".", "train_fns", "]", "num_train_frames", "=", "get_number_of_frames", "(", "train_fns", ")", "total_frames", "+=", "num_train_frames", "num_valid_frames", "=", "get_number_of_frames", "(", "self", ".", "corpus", ".", "get_valid_fns", "(", ")", "[", "0", "]", ")", "total_frames", "+=", "num_valid_frames", "num_test_frames", "=", "get_number_of_frames", "(", "self", ".", "corpus", ".", "get_test_fns", "(", ")", "[", "0", "]", ")", "total_frames", "+=", "num_test_frames", "print", "(", "\"Train duration: %0.3f\"", "%", "numframes_to_minutes", "(", "num_train_frames", ")", ")", "print", "(", "\"Validation duration: %0.3f\"", "%", "numframes_to_minutes", "(", "num_valid_frames", ")", ")", "print", "(", "\"Test duration: %0.3f\"", "%", "numframes_to_minutes", "(", "num_test_frames", ")", ")", "print", "(", "\"Total duration: %0.3f\"", "%", "numframes_to_minutes", "(", "total_frames", ")", ")" ]
Prints statistics about the the total duration of recordings in the corpus.
[ "Prints", "statistics", "about", "the", "the", "total", "duration", "of", "recordings", "in", "the", "corpus", "." ]
f94c63e4d5fe719fb1deba449b177bb299d225fb
https://github.com/persephone-tools/persephone/blob/f94c63e4d5fe719fb1deba449b177bb299d225fb/persephone/corpus_reader.py#L205-L241
train
persephone-tools/persephone
persephone/rnn_ctc.py
lstm_cell
def lstm_cell(hidden_size): """ Wrapper function to create an LSTM cell. """ return tf.contrib.rnn.LSTMCell( hidden_size, use_peepholes=True, state_is_tuple=True)
python
def lstm_cell(hidden_size): """ Wrapper function to create an LSTM cell. """ return tf.contrib.rnn.LSTMCell( hidden_size, use_peepholes=True, state_is_tuple=True)
[ "def", "lstm_cell", "(", "hidden_size", ")", ":", "return", "tf", ".", "contrib", ".", "rnn", ".", "LSTMCell", "(", "hidden_size", ",", "use_peepholes", "=", "True", ",", "state_is_tuple", "=", "True", ")" ]
Wrapper function to create an LSTM cell.
[ "Wrapper", "function", "to", "create", "an", "LSTM", "cell", "." ]
f94c63e4d5fe719fb1deba449b177bb299d225fb
https://github.com/persephone-tools/persephone/blob/f94c63e4d5fe719fb1deba449b177bb299d225fb/persephone/rnn_ctc.py#L12-L16
train
persephone-tools/persephone
persephone/rnn_ctc.py
Model.write_desc
def write_desc(self) -> None: """ Writes a description of the model to the exp_dir. """ path = os.path.join(self.exp_dir, "model_description.txt") with open(path, "w") as desc_f: for key, val in self.__dict__.items(): print("%s=%s" % (key, val), file=desc_f) import json json_path = os.path.join(self.exp_dir, "model_description.json") desc = { } #type: Dict[str, Any] # For use in decoding from a saved model desc["topology"] = { "batch_x_name" : self.batch_x.name, #type: ignore "batch_x_lens_name" : self.batch_x_lens.name, #type: ignore "dense_decoded_name" : self.dense_decoded.name #type: ignore } desc["model_type"] = str(self.__class__) for key, val in self.__dict__.items(): if isinstance(val, int): desc[str(key)] = val elif isinstance(val, tf.Tensor): desc[key] = { "type": "tf.Tensor", "name": val.name, #type: ignore "shape": str(val.shape), #type: ignore "dtype" : str(val.dtype), #type: ignore "value" : str(val), } elif isinstance(val, tf.SparseTensor): #type: ignore desc[key] = { "type": "tf.SparseTensor", "value": str(val), #type: ignore } else: desc[str(key)] = str(val) with open(json_path, "w") as json_desc_f: json.dump(desc, json_desc_f, skipkeys=True)
python
def write_desc(self) -> None: """ Writes a description of the model to the exp_dir. """ path = os.path.join(self.exp_dir, "model_description.txt") with open(path, "w") as desc_f: for key, val in self.__dict__.items(): print("%s=%s" % (key, val), file=desc_f) import json json_path = os.path.join(self.exp_dir, "model_description.json") desc = { } #type: Dict[str, Any] # For use in decoding from a saved model desc["topology"] = { "batch_x_name" : self.batch_x.name, #type: ignore "batch_x_lens_name" : self.batch_x_lens.name, #type: ignore "dense_decoded_name" : self.dense_decoded.name #type: ignore } desc["model_type"] = str(self.__class__) for key, val in self.__dict__.items(): if isinstance(val, int): desc[str(key)] = val elif isinstance(val, tf.Tensor): desc[key] = { "type": "tf.Tensor", "name": val.name, #type: ignore "shape": str(val.shape), #type: ignore "dtype" : str(val.dtype), #type: ignore "value" : str(val), } elif isinstance(val, tf.SparseTensor): #type: ignore desc[key] = { "type": "tf.SparseTensor", "value": str(val), #type: ignore } else: desc[str(key)] = str(val) with open(json_path, "w") as json_desc_f: json.dump(desc, json_desc_f, skipkeys=True)
[ "def", "write_desc", "(", "self", ")", "->", "None", ":", "path", "=", "os", ".", "path", ".", "join", "(", "self", ".", "exp_dir", ",", "\"model_description.txt\"", ")", "with", "open", "(", "path", ",", "\"w\"", ")", "as", "desc_f", ":", "for", "key", ",", "val", "in", "self", ".", "__dict__", ".", "items", "(", ")", ":", "print", "(", "\"%s=%s\"", "%", "(", "key", ",", "val", ")", ",", "file", "=", "desc_f", ")", "import", "json", "json_path", "=", "os", ".", "path", ".", "join", "(", "self", ".", "exp_dir", ",", "\"model_description.json\"", ")", "desc", "=", "{", "}", "#type: Dict[str, Any]", "# For use in decoding from a saved model", "desc", "[", "\"topology\"", "]", "=", "{", "\"batch_x_name\"", ":", "self", ".", "batch_x", ".", "name", ",", "#type: ignore", "\"batch_x_lens_name\"", ":", "self", ".", "batch_x_lens", ".", "name", ",", "#type: ignore", "\"dense_decoded_name\"", ":", "self", ".", "dense_decoded", ".", "name", "#type: ignore", "}", "desc", "[", "\"model_type\"", "]", "=", "str", "(", "self", ".", "__class__", ")", "for", "key", ",", "val", "in", "self", ".", "__dict__", ".", "items", "(", ")", ":", "if", "isinstance", "(", "val", ",", "int", ")", ":", "desc", "[", "str", "(", "key", ")", "]", "=", "val", "elif", "isinstance", "(", "val", ",", "tf", ".", "Tensor", ")", ":", "desc", "[", "key", "]", "=", "{", "\"type\"", ":", "\"tf.Tensor\"", ",", "\"name\"", ":", "val", ".", "name", ",", "#type: ignore", "\"shape\"", ":", "str", "(", "val", ".", "shape", ")", ",", "#type: ignore", "\"dtype\"", ":", "str", "(", "val", ".", "dtype", ")", ",", "#type: ignore", "\"value\"", ":", "str", "(", "val", ")", ",", "}", "elif", "isinstance", "(", "val", ",", "tf", ".", "SparseTensor", ")", ":", "#type: ignore", "desc", "[", "key", "]", "=", "{", "\"type\"", ":", "\"tf.SparseTensor\"", ",", "\"value\"", ":", "str", "(", "val", ")", ",", "#type: ignore", "}", "else", ":", "desc", "[", "str", "(", "key", ")", "]", "=", "str", "(", "val", ")", "with", "open", "(", "json_path", ",", "\"w\"", ")", "as", "json_desc_f", ":", "json", ".", "dump", "(", "desc", ",", "json_desc_f", ",", "skipkeys", "=", "True", ")" ]
Writes a description of the model to the exp_dir.
[ "Writes", "a", "description", "of", "the", "model", "to", "the", "exp_dir", "." ]
f94c63e4d5fe719fb1deba449b177bb299d225fb
https://github.com/persephone-tools/persephone/blob/f94c63e4d5fe719fb1deba449b177bb299d225fb/persephone/rnn_ctc.py#L21-L58
train
persephone-tools/persephone
persephone/preprocess/feat_extract.py
empty_wav
def empty_wav(wav_path: Union[Path, str]) -> bool: """Check if a wav contains data""" with wave.open(str(wav_path), 'rb') as wav_f: return wav_f.getnframes() == 0
python
def empty_wav(wav_path: Union[Path, str]) -> bool: """Check if a wav contains data""" with wave.open(str(wav_path), 'rb') as wav_f: return wav_f.getnframes() == 0
[ "def", "empty_wav", "(", "wav_path", ":", "Union", "[", "Path", ",", "str", "]", ")", "->", "bool", ":", "with", "wave", ".", "open", "(", "str", "(", "wav_path", ")", ",", "'rb'", ")", "as", "wav_f", ":", "return", "wav_f", ".", "getnframes", "(", ")", "==", "0" ]
Check if a wav contains data
[ "Check", "if", "a", "wav", "contains", "data" ]
f94c63e4d5fe719fb1deba449b177bb299d225fb
https://github.com/persephone-tools/persephone/blob/f94c63e4d5fe719fb1deba449b177bb299d225fb/persephone/preprocess/feat_extract.py#L19-L22
train
persephone-tools/persephone
persephone/preprocess/feat_extract.py
extract_energy
def extract_energy(rate, sig): """ Extracts the energy of frames. """ mfcc = python_speech_features.mfcc(sig, rate, appendEnergy=True) energy_row_vec = mfcc[:, 0] energy_col_vec = energy_row_vec[:, np.newaxis] return energy_col_vec
python
def extract_energy(rate, sig): """ Extracts the energy of frames. """ mfcc = python_speech_features.mfcc(sig, rate, appendEnergy=True) energy_row_vec = mfcc[:, 0] energy_col_vec = energy_row_vec[:, np.newaxis] return energy_col_vec
[ "def", "extract_energy", "(", "rate", ",", "sig", ")", ":", "mfcc", "=", "python_speech_features", ".", "mfcc", "(", "sig", ",", "rate", ",", "appendEnergy", "=", "True", ")", "energy_row_vec", "=", "mfcc", "[", ":", ",", "0", "]", "energy_col_vec", "=", "energy_row_vec", "[", ":", ",", "np", ".", "newaxis", "]", "return", "energy_col_vec" ]
Extracts the energy of frames.
[ "Extracts", "the", "energy", "of", "frames", "." ]
f94c63e4d5fe719fb1deba449b177bb299d225fb
https://github.com/persephone-tools/persephone/blob/f94c63e4d5fe719fb1deba449b177bb299d225fb/persephone/preprocess/feat_extract.py#L25-L31
train
persephone-tools/persephone
persephone/preprocess/feat_extract.py
fbank
def fbank(wav_path, flat=True): """ Currently grabs log Mel filterbank, deltas and double deltas.""" (rate, sig) = wav.read(wav_path) if len(sig) == 0: logger.warning("Empty wav: {}".format(wav_path)) fbank_feat = python_speech_features.logfbank(sig, rate, nfilt=40) energy = extract_energy(rate, sig) feat = np.hstack([energy, fbank_feat]) delta_feat = python_speech_features.delta(feat, 2) delta_delta_feat = python_speech_features.delta(delta_feat, 2) all_feats = [feat, delta_feat, delta_delta_feat] if not flat: all_feats = np.array(all_feats) # Make time the first dimension for easy length normalization padding # later. all_feats = np.swapaxes(all_feats, 0, 1) all_feats = np.swapaxes(all_feats, 1, 2) else: all_feats = np.concatenate(all_feats, axis=1) # Log Mel Filterbank, with delta, and double delta feat_fn = wav_path[:-3] + "fbank.npy" np.save(feat_fn, all_feats)
python
def fbank(wav_path, flat=True): """ Currently grabs log Mel filterbank, deltas and double deltas.""" (rate, sig) = wav.read(wav_path) if len(sig) == 0: logger.warning("Empty wav: {}".format(wav_path)) fbank_feat = python_speech_features.logfbank(sig, rate, nfilt=40) energy = extract_energy(rate, sig) feat = np.hstack([energy, fbank_feat]) delta_feat = python_speech_features.delta(feat, 2) delta_delta_feat = python_speech_features.delta(delta_feat, 2) all_feats = [feat, delta_feat, delta_delta_feat] if not flat: all_feats = np.array(all_feats) # Make time the first dimension for easy length normalization padding # later. all_feats = np.swapaxes(all_feats, 0, 1) all_feats = np.swapaxes(all_feats, 1, 2) else: all_feats = np.concatenate(all_feats, axis=1) # Log Mel Filterbank, with delta, and double delta feat_fn = wav_path[:-3] + "fbank.npy" np.save(feat_fn, all_feats)
[ "def", "fbank", "(", "wav_path", ",", "flat", "=", "True", ")", ":", "(", "rate", ",", "sig", ")", "=", "wav", ".", "read", "(", "wav_path", ")", "if", "len", "(", "sig", ")", "==", "0", ":", "logger", ".", "warning", "(", "\"Empty wav: {}\"", ".", "format", "(", "wav_path", ")", ")", "fbank_feat", "=", "python_speech_features", ".", "logfbank", "(", "sig", ",", "rate", ",", "nfilt", "=", "40", ")", "energy", "=", "extract_energy", "(", "rate", ",", "sig", ")", "feat", "=", "np", ".", "hstack", "(", "[", "energy", ",", "fbank_feat", "]", ")", "delta_feat", "=", "python_speech_features", ".", "delta", "(", "feat", ",", "2", ")", "delta_delta_feat", "=", "python_speech_features", ".", "delta", "(", "delta_feat", ",", "2", ")", "all_feats", "=", "[", "feat", ",", "delta_feat", ",", "delta_delta_feat", "]", "if", "not", "flat", ":", "all_feats", "=", "np", ".", "array", "(", "all_feats", ")", "# Make time the first dimension for easy length normalization padding", "# later.", "all_feats", "=", "np", ".", "swapaxes", "(", "all_feats", ",", "0", ",", "1", ")", "all_feats", "=", "np", ".", "swapaxes", "(", "all_feats", ",", "1", ",", "2", ")", "else", ":", "all_feats", "=", "np", ".", "concatenate", "(", "all_feats", ",", "axis", "=", "1", ")", "# Log Mel Filterbank, with delta, and double delta", "feat_fn", "=", "wav_path", "[", ":", "-", "3", "]", "+", "\"fbank.npy\"", "np", ".", "save", "(", "feat_fn", ",", "all_feats", ")" ]
Currently grabs log Mel filterbank, deltas and double deltas.
[ "Currently", "grabs", "log", "Mel", "filterbank", "deltas", "and", "double", "deltas", "." ]
f94c63e4d5fe719fb1deba449b177bb299d225fb
https://github.com/persephone-tools/persephone/blob/f94c63e4d5fe719fb1deba449b177bb299d225fb/persephone/preprocess/feat_extract.py#L33-L56
train
persephone-tools/persephone
persephone/preprocess/feat_extract.py
mfcc
def mfcc(wav_path): """ Grabs MFCC features with energy and derivates. """ (rate, sig) = wav.read(wav_path) feat = python_speech_features.mfcc(sig, rate, appendEnergy=True) delta_feat = python_speech_features.delta(feat, 2) all_feats = [feat, delta_feat] all_feats = np.array(all_feats) # Make time the first dimension for easy length normalization padding later. all_feats = np.swapaxes(all_feats, 0, 1) all_feats = np.swapaxes(all_feats, 1, 2) feat_fn = wav_path[:-3] + "mfcc13_d.npy" np.save(feat_fn, all_feats)
python
def mfcc(wav_path): """ Grabs MFCC features with energy and derivates. """ (rate, sig) = wav.read(wav_path) feat = python_speech_features.mfcc(sig, rate, appendEnergy=True) delta_feat = python_speech_features.delta(feat, 2) all_feats = [feat, delta_feat] all_feats = np.array(all_feats) # Make time the first dimension for easy length normalization padding later. all_feats = np.swapaxes(all_feats, 0, 1) all_feats = np.swapaxes(all_feats, 1, 2) feat_fn = wav_path[:-3] + "mfcc13_d.npy" np.save(feat_fn, all_feats)
[ "def", "mfcc", "(", "wav_path", ")", ":", "(", "rate", ",", "sig", ")", "=", "wav", ".", "read", "(", "wav_path", ")", "feat", "=", "python_speech_features", ".", "mfcc", "(", "sig", ",", "rate", ",", "appendEnergy", "=", "True", ")", "delta_feat", "=", "python_speech_features", ".", "delta", "(", "feat", ",", "2", ")", "all_feats", "=", "[", "feat", ",", "delta_feat", "]", "all_feats", "=", "np", ".", "array", "(", "all_feats", ")", "# Make time the first dimension for easy length normalization padding later.", "all_feats", "=", "np", ".", "swapaxes", "(", "all_feats", ",", "0", ",", "1", ")", "all_feats", "=", "np", ".", "swapaxes", "(", "all_feats", ",", "1", ",", "2", ")", "feat_fn", "=", "wav_path", "[", ":", "-", "3", "]", "+", "\"mfcc13_d.npy\"", "np", ".", "save", "(", "feat_fn", ",", "all_feats", ")" ]
Grabs MFCC features with energy and derivates.
[ "Grabs", "MFCC", "features", "with", "energy", "and", "derivates", "." ]
f94c63e4d5fe719fb1deba449b177bb299d225fb
https://github.com/persephone-tools/persephone/blob/f94c63e4d5fe719fb1deba449b177bb299d225fb/persephone/preprocess/feat_extract.py#L58-L71
train
persephone-tools/persephone
persephone/preprocess/feat_extract.py
from_dir
def from_dir(dirpath: Path, feat_type: str) -> None: """ Performs feature extraction from the WAV files in a directory. Args: dirpath: A `Path` to the directory where the WAV files reside. feat_type: The type of features that are being used. """ logger.info("Extracting features from directory {}".format(dirpath)) dirname = str(dirpath) def all_wavs_processed() -> bool: """ True if all wavs in the directory have corresponding numpy feature file; False otherwise. """ for fn in os.listdir(dirname): prefix, ext = os.path.splitext(fn) if ext == ".wav": if not os.path.exists( os.path.join(dirname, "%s.%s.npy" % (prefix, feat_type))): return False return True if all_wavs_processed(): # Then nothing needs to be done here logger.info("All WAV files already preprocessed") return # Otherwise, go on and process everything... # If pitch features are needed as part of this, extract them if feat_type == "pitch" or feat_type == "fbank_and_pitch": kaldi_pitch(dirname, dirname) # Then apply file-wise feature extraction for filename in os.listdir(dirname): logger.info("Preparing %s features for %s", feat_type, filename) path = os.path.join(dirname, filename) if path.endswith(".wav"): if empty_wav(path): raise PersephoneException("Can't extract features for {} since it is an empty WAV file. Remove it from the corpus.".format(path)) if feat_type == "fbank": fbank(path) elif feat_type == "fbank_and_pitch": fbank(path) prefix = os.path.splitext(filename)[0] combine_fbank_and_pitch(dirname, prefix) elif feat_type == "pitch": # Already extracted pitch at the start of this function. pass elif feat_type == "mfcc13_d": mfcc(path) else: logger.warning("Feature type not found: %s", feat_type) raise PersephoneException("Feature type not found: %s" % feat_type)
python
def from_dir(dirpath: Path, feat_type: str) -> None: """ Performs feature extraction from the WAV files in a directory. Args: dirpath: A `Path` to the directory where the WAV files reside. feat_type: The type of features that are being used. """ logger.info("Extracting features from directory {}".format(dirpath)) dirname = str(dirpath) def all_wavs_processed() -> bool: """ True if all wavs in the directory have corresponding numpy feature file; False otherwise. """ for fn in os.listdir(dirname): prefix, ext = os.path.splitext(fn) if ext == ".wav": if not os.path.exists( os.path.join(dirname, "%s.%s.npy" % (prefix, feat_type))): return False return True if all_wavs_processed(): # Then nothing needs to be done here logger.info("All WAV files already preprocessed") return # Otherwise, go on and process everything... # If pitch features are needed as part of this, extract them if feat_type == "pitch" or feat_type == "fbank_and_pitch": kaldi_pitch(dirname, dirname) # Then apply file-wise feature extraction for filename in os.listdir(dirname): logger.info("Preparing %s features for %s", feat_type, filename) path = os.path.join(dirname, filename) if path.endswith(".wav"): if empty_wav(path): raise PersephoneException("Can't extract features for {} since it is an empty WAV file. Remove it from the corpus.".format(path)) if feat_type == "fbank": fbank(path) elif feat_type == "fbank_and_pitch": fbank(path) prefix = os.path.splitext(filename)[0] combine_fbank_and_pitch(dirname, prefix) elif feat_type == "pitch": # Already extracted pitch at the start of this function. pass elif feat_type == "mfcc13_d": mfcc(path) else: logger.warning("Feature type not found: %s", feat_type) raise PersephoneException("Feature type not found: %s" % feat_type)
[ "def", "from_dir", "(", "dirpath", ":", "Path", ",", "feat_type", ":", "str", ")", "->", "None", ":", "logger", ".", "info", "(", "\"Extracting features from directory {}\"", ".", "format", "(", "dirpath", ")", ")", "dirname", "=", "str", "(", "dirpath", ")", "def", "all_wavs_processed", "(", ")", "->", "bool", ":", "\"\"\"\n True if all wavs in the directory have corresponding numpy feature\n file; False otherwise.\n \"\"\"", "for", "fn", "in", "os", ".", "listdir", "(", "dirname", ")", ":", "prefix", ",", "ext", "=", "os", ".", "path", ".", "splitext", "(", "fn", ")", "if", "ext", "==", "\".wav\"", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "os", ".", "path", ".", "join", "(", "dirname", ",", "\"%s.%s.npy\"", "%", "(", "prefix", ",", "feat_type", ")", ")", ")", ":", "return", "False", "return", "True", "if", "all_wavs_processed", "(", ")", ":", "# Then nothing needs to be done here", "logger", ".", "info", "(", "\"All WAV files already preprocessed\"", ")", "return", "# Otherwise, go on and process everything...", "# If pitch features are needed as part of this, extract them", "if", "feat_type", "==", "\"pitch\"", "or", "feat_type", "==", "\"fbank_and_pitch\"", ":", "kaldi_pitch", "(", "dirname", ",", "dirname", ")", "# Then apply file-wise feature extraction", "for", "filename", "in", "os", ".", "listdir", "(", "dirname", ")", ":", "logger", ".", "info", "(", "\"Preparing %s features for %s\"", ",", "feat_type", ",", "filename", ")", "path", "=", "os", ".", "path", ".", "join", "(", "dirname", ",", "filename", ")", "if", "path", ".", "endswith", "(", "\".wav\"", ")", ":", "if", "empty_wav", "(", "path", ")", ":", "raise", "PersephoneException", "(", "\"Can't extract features for {} since it is an empty WAV file. Remove it from the corpus.\"", ".", "format", "(", "path", ")", ")", "if", "feat_type", "==", "\"fbank\"", ":", "fbank", "(", "path", ")", "elif", "feat_type", "==", "\"fbank_and_pitch\"", ":", "fbank", "(", "path", ")", "prefix", "=", "os", ".", "path", ".", "splitext", "(", "filename", ")", "[", "0", "]", "combine_fbank_and_pitch", "(", "dirname", ",", "prefix", ")", "elif", "feat_type", "==", "\"pitch\"", ":", "# Already extracted pitch at the start of this function.", "pass", "elif", "feat_type", "==", "\"mfcc13_d\"", ":", "mfcc", "(", "path", ")", "else", ":", "logger", ".", "warning", "(", "\"Feature type not found: %s\"", ",", "feat_type", ")", "raise", "PersephoneException", "(", "\"Feature type not found: %s\"", "%", "feat_type", ")" ]
Performs feature extraction from the WAV files in a directory. Args: dirpath: A `Path` to the directory where the WAV files reside. feat_type: The type of features that are being used.
[ "Performs", "feature", "extraction", "from", "the", "WAV", "files", "in", "a", "directory", "." ]
f94c63e4d5fe719fb1deba449b177bb299d225fb
https://github.com/persephone-tools/persephone/blob/f94c63e4d5fe719fb1deba449b177bb299d225fb/persephone/preprocess/feat_extract.py#L117-L173
train
persephone-tools/persephone
persephone/preprocess/feat_extract.py
convert_wav
def convert_wav(org_wav_fn: Path, tgt_wav_fn: Path) -> None: """ Converts the wav into a 16bit mono 16000Hz wav. Args: org_wav_fn: A `Path` to the original wave file tgt_wav_fn: The `Path` to output the processed wave file """ if not org_wav_fn.exists(): raise FileNotFoundError args = [config.FFMPEG_PATH, "-i", str(org_wav_fn), "-ac", "1", "-ar", "16000", str(tgt_wav_fn)] subprocess.run(args)
python
def convert_wav(org_wav_fn: Path, tgt_wav_fn: Path) -> None: """ Converts the wav into a 16bit mono 16000Hz wav. Args: org_wav_fn: A `Path` to the original wave file tgt_wav_fn: The `Path` to output the processed wave file """ if not org_wav_fn.exists(): raise FileNotFoundError args = [config.FFMPEG_PATH, "-i", str(org_wav_fn), "-ac", "1", "-ar", "16000", str(tgt_wav_fn)] subprocess.run(args)
[ "def", "convert_wav", "(", "org_wav_fn", ":", "Path", ",", "tgt_wav_fn", ":", "Path", ")", "->", "None", ":", "if", "not", "org_wav_fn", ".", "exists", "(", ")", ":", "raise", "FileNotFoundError", "args", "=", "[", "config", ".", "FFMPEG_PATH", ",", "\"-i\"", ",", "str", "(", "org_wav_fn", ")", ",", "\"-ac\"", ",", "\"1\"", ",", "\"-ar\"", ",", "\"16000\"", ",", "str", "(", "tgt_wav_fn", ")", "]", "subprocess", ".", "run", "(", "args", ")" ]
Converts the wav into a 16bit mono 16000Hz wav. Args: org_wav_fn: A `Path` to the original wave file tgt_wav_fn: The `Path` to output the processed wave file
[ "Converts", "the", "wav", "into", "a", "16bit", "mono", "16000Hz", "wav", "." ]
f94c63e4d5fe719fb1deba449b177bb299d225fb
https://github.com/persephone-tools/persephone/blob/f94c63e4d5fe719fb1deba449b177bb299d225fb/persephone/preprocess/feat_extract.py#L175-L186
train
persephone-tools/persephone
persephone/preprocess/feat_extract.py
kaldi_pitch
def kaldi_pitch(wav_dir: str, feat_dir: str) -> None: """ Extract Kaldi pitch features. Assumes 16k mono wav files.""" logger.debug("Make wav.scp and pitch.scp files") # Make wav.scp and pitch.scp files prefixes = [] for fn in os.listdir(wav_dir): prefix, ext = os.path.splitext(fn) if ext == ".wav": prefixes.append(prefix) wav_scp_path = os.path.join(feat_dir, "wavs.scp") with open(wav_scp_path, "w") as wav_scp: for prefix in prefixes: logger.info("Writing wav file: %s", os.path.join(wav_dir, prefix + ".wav")) print(prefix, os.path.join(wav_dir, prefix + ".wav"), file=wav_scp) pitch_scp_path = os.path.join(feat_dir, "pitch_feats.scp") with open(pitch_scp_path, "w") as pitch_scp: for prefix in prefixes: logger.info("Writing scp file: %s", os.path.join(feat_dir, prefix + ".pitch.txt")) print(prefix, os.path.join(feat_dir, prefix + ".pitch.txt"), file=pitch_scp) # Call Kaldi pitch feat extraction args = [os.path.join(config.KALDI_ROOT, "src/featbin/compute-kaldi-pitch-feats"), "scp:%s" % (wav_scp_path), "scp,t:%s" % pitch_scp_path] logger.info("Extracting pitch features from wavs listed in {}".format( wav_scp_path)) subprocess.run(args) # Convert the Kaldi pitch *.txt files to numpy arrays. for fn in os.listdir(feat_dir): if fn.endswith(".pitch.txt"): pitch_feats = [] with open(os.path.join(feat_dir, fn)) as f: for line in f: sp = line.split() if len(sp) > 1: pitch_feats.append([float(sp[0]), float(sp[1])]) prefix, _ = os.path.splitext(fn) out_fn = prefix + ".npy" a = np.array(pitch_feats) np.save(os.path.join(feat_dir, out_fn), a)
python
def kaldi_pitch(wav_dir: str, feat_dir: str) -> None: """ Extract Kaldi pitch features. Assumes 16k mono wav files.""" logger.debug("Make wav.scp and pitch.scp files") # Make wav.scp and pitch.scp files prefixes = [] for fn in os.listdir(wav_dir): prefix, ext = os.path.splitext(fn) if ext == ".wav": prefixes.append(prefix) wav_scp_path = os.path.join(feat_dir, "wavs.scp") with open(wav_scp_path, "w") as wav_scp: for prefix in prefixes: logger.info("Writing wav file: %s", os.path.join(wav_dir, prefix + ".wav")) print(prefix, os.path.join(wav_dir, prefix + ".wav"), file=wav_scp) pitch_scp_path = os.path.join(feat_dir, "pitch_feats.scp") with open(pitch_scp_path, "w") as pitch_scp: for prefix in prefixes: logger.info("Writing scp file: %s", os.path.join(feat_dir, prefix + ".pitch.txt")) print(prefix, os.path.join(feat_dir, prefix + ".pitch.txt"), file=pitch_scp) # Call Kaldi pitch feat extraction args = [os.path.join(config.KALDI_ROOT, "src/featbin/compute-kaldi-pitch-feats"), "scp:%s" % (wav_scp_path), "scp,t:%s" % pitch_scp_path] logger.info("Extracting pitch features from wavs listed in {}".format( wav_scp_path)) subprocess.run(args) # Convert the Kaldi pitch *.txt files to numpy arrays. for fn in os.listdir(feat_dir): if fn.endswith(".pitch.txt"): pitch_feats = [] with open(os.path.join(feat_dir, fn)) as f: for line in f: sp = line.split() if len(sp) > 1: pitch_feats.append([float(sp[0]), float(sp[1])]) prefix, _ = os.path.splitext(fn) out_fn = prefix + ".npy" a = np.array(pitch_feats) np.save(os.path.join(feat_dir, out_fn), a)
[ "def", "kaldi_pitch", "(", "wav_dir", ":", "str", ",", "feat_dir", ":", "str", ")", "->", "None", ":", "logger", ".", "debug", "(", "\"Make wav.scp and pitch.scp files\"", ")", "# Make wav.scp and pitch.scp files", "prefixes", "=", "[", "]", "for", "fn", "in", "os", ".", "listdir", "(", "wav_dir", ")", ":", "prefix", ",", "ext", "=", "os", ".", "path", ".", "splitext", "(", "fn", ")", "if", "ext", "==", "\".wav\"", ":", "prefixes", ".", "append", "(", "prefix", ")", "wav_scp_path", "=", "os", ".", "path", ".", "join", "(", "feat_dir", ",", "\"wavs.scp\"", ")", "with", "open", "(", "wav_scp_path", ",", "\"w\"", ")", "as", "wav_scp", ":", "for", "prefix", "in", "prefixes", ":", "logger", ".", "info", "(", "\"Writing wav file: %s\"", ",", "os", ".", "path", ".", "join", "(", "wav_dir", ",", "prefix", "+", "\".wav\"", ")", ")", "print", "(", "prefix", ",", "os", ".", "path", ".", "join", "(", "wav_dir", ",", "prefix", "+", "\".wav\"", ")", ",", "file", "=", "wav_scp", ")", "pitch_scp_path", "=", "os", ".", "path", ".", "join", "(", "feat_dir", ",", "\"pitch_feats.scp\"", ")", "with", "open", "(", "pitch_scp_path", ",", "\"w\"", ")", "as", "pitch_scp", ":", "for", "prefix", "in", "prefixes", ":", "logger", ".", "info", "(", "\"Writing scp file: %s\"", ",", "os", ".", "path", ".", "join", "(", "feat_dir", ",", "prefix", "+", "\".pitch.txt\"", ")", ")", "print", "(", "prefix", ",", "os", ".", "path", ".", "join", "(", "feat_dir", ",", "prefix", "+", "\".pitch.txt\"", ")", ",", "file", "=", "pitch_scp", ")", "# Call Kaldi pitch feat extraction", "args", "=", "[", "os", ".", "path", ".", "join", "(", "config", ".", "KALDI_ROOT", ",", "\"src/featbin/compute-kaldi-pitch-feats\"", ")", ",", "\"scp:%s\"", "%", "(", "wav_scp_path", ")", ",", "\"scp,t:%s\"", "%", "pitch_scp_path", "]", "logger", ".", "info", "(", "\"Extracting pitch features from wavs listed in {}\"", ".", "format", "(", "wav_scp_path", ")", ")", "subprocess", ".", "run", "(", "args", ")", "# Convert the Kaldi pitch *.txt files to numpy arrays.", "for", "fn", "in", "os", ".", "listdir", "(", "feat_dir", ")", ":", "if", "fn", ".", "endswith", "(", "\".pitch.txt\"", ")", ":", "pitch_feats", "=", "[", "]", "with", "open", "(", "os", ".", "path", ".", "join", "(", "feat_dir", ",", "fn", ")", ")", "as", "f", ":", "for", "line", "in", "f", ":", "sp", "=", "line", ".", "split", "(", ")", "if", "len", "(", "sp", ")", ">", "1", ":", "pitch_feats", ".", "append", "(", "[", "float", "(", "sp", "[", "0", "]", ")", ",", "float", "(", "sp", "[", "1", "]", ")", "]", ")", "prefix", ",", "_", "=", "os", ".", "path", ".", "splitext", "(", "fn", ")", "out_fn", "=", "prefix", "+", "\".npy\"", "a", "=", "np", ".", "array", "(", "pitch_feats", ")", "np", ".", "save", "(", "os", ".", "path", ".", "join", "(", "feat_dir", ",", "out_fn", ")", ",", "a", ")" ]
Extract Kaldi pitch features. Assumes 16k mono wav files.
[ "Extract", "Kaldi", "pitch", "features", ".", "Assumes", "16k", "mono", "wav", "files", "." ]
f94c63e4d5fe719fb1deba449b177bb299d225fb
https://github.com/persephone-tools/persephone/blob/f94c63e4d5fe719fb1deba449b177bb299d225fb/persephone/preprocess/feat_extract.py#L188-L230
train
persephone-tools/persephone
persephone/experiment.py
get_exp_dir_num
def get_exp_dir_num(parent_dir: str) -> int: """ Gets the number of the current experiment directory.""" return max([int(fn.split(".")[0]) for fn in os.listdir(parent_dir) if fn.split(".")[0].isdigit()] + [-1])
python
def get_exp_dir_num(parent_dir: str) -> int: """ Gets the number of the current experiment directory.""" return max([int(fn.split(".")[0]) for fn in os.listdir(parent_dir) if fn.split(".")[0].isdigit()] + [-1])
[ "def", "get_exp_dir_num", "(", "parent_dir", ":", "str", ")", "->", "int", ":", "return", "max", "(", "[", "int", "(", "fn", ".", "split", "(", "\".\"", ")", "[", "0", "]", ")", "for", "fn", "in", "os", ".", "listdir", "(", "parent_dir", ")", "if", "fn", ".", "split", "(", "\".\"", ")", "[", "0", "]", ".", "isdigit", "(", ")", "]", "+", "[", "-", "1", "]", ")" ]
Gets the number of the current experiment directory.
[ "Gets", "the", "number", "of", "the", "current", "experiment", "directory", "." ]
f94c63e4d5fe719fb1deba449b177bb299d225fb
https://github.com/persephone-tools/persephone/blob/f94c63e4d5fe719fb1deba449b177bb299d225fb/persephone/experiment.py#L18-L22
train
persephone-tools/persephone
persephone/experiment.py
transcribe
def transcribe(model_path, corpus): """ Applies a trained model to untranscribed data in a Corpus. """ exp_dir = prep_exp_dir() model = get_simple_model(exp_dir, corpus) model.transcribe(model_path)
python
def transcribe(model_path, corpus): """ Applies a trained model to untranscribed data in a Corpus. """ exp_dir = prep_exp_dir() model = get_simple_model(exp_dir, corpus) model.transcribe(model_path)
[ "def", "transcribe", "(", "model_path", ",", "corpus", ")", ":", "exp_dir", "=", "prep_exp_dir", "(", ")", "model", "=", "get_simple_model", "(", "exp_dir", ",", "corpus", ")", "model", ".", "transcribe", "(", "model_path", ")" ]
Applies a trained model to untranscribed data in a Corpus.
[ "Applies", "a", "trained", "model", "to", "untranscribed", "data", "in", "a", "Corpus", "." ]
f94c63e4d5fe719fb1deba449b177bb299d225fb
https://github.com/persephone-tools/persephone/blob/f94c63e4d5fe719fb1deba449b177bb299d225fb/persephone/experiment.py#L106-L111
train
persephone-tools/persephone
persephone/preprocess/wav.py
trim_wav_ms
def trim_wav_ms(in_path: Path, out_path: Path, start_time: int, end_time: int) -> None: """ Extracts part of a WAV File. First attempts to call sox. If sox is unavailable, it backs off to pydub+ffmpeg. Args: in_path: A path to the source file to extract a portion of out_path: A path describing the to-be-created WAV file. start_time: The point in the source WAV file at which to begin extraction. end_time: The point in the source WAV file at which to end extraction. """ try: trim_wav_sox(in_path, out_path, start_time, end_time) except FileNotFoundError: # Then sox isn't installed, so use pydub/ffmpeg trim_wav_pydub(in_path, out_path, start_time, end_time) except subprocess.CalledProcessError: # Then there is an issue calling sox. Perhaps the input file is an mp4 # or some other filetype not supported out-of-the-box by sox. So we try # using pydub/ffmpeg. trim_wav_pydub(in_path, out_path, start_time, end_time)
python
def trim_wav_ms(in_path: Path, out_path: Path, start_time: int, end_time: int) -> None: """ Extracts part of a WAV File. First attempts to call sox. If sox is unavailable, it backs off to pydub+ffmpeg. Args: in_path: A path to the source file to extract a portion of out_path: A path describing the to-be-created WAV file. start_time: The point in the source WAV file at which to begin extraction. end_time: The point in the source WAV file at which to end extraction. """ try: trim_wav_sox(in_path, out_path, start_time, end_time) except FileNotFoundError: # Then sox isn't installed, so use pydub/ffmpeg trim_wav_pydub(in_path, out_path, start_time, end_time) except subprocess.CalledProcessError: # Then there is an issue calling sox. Perhaps the input file is an mp4 # or some other filetype not supported out-of-the-box by sox. So we try # using pydub/ffmpeg. trim_wav_pydub(in_path, out_path, start_time, end_time)
[ "def", "trim_wav_ms", "(", "in_path", ":", "Path", ",", "out_path", ":", "Path", ",", "start_time", ":", "int", ",", "end_time", ":", "int", ")", "->", "None", ":", "try", ":", "trim_wav_sox", "(", "in_path", ",", "out_path", ",", "start_time", ",", "end_time", ")", "except", "FileNotFoundError", ":", "# Then sox isn't installed, so use pydub/ffmpeg", "trim_wav_pydub", "(", "in_path", ",", "out_path", ",", "start_time", ",", "end_time", ")", "except", "subprocess", ".", "CalledProcessError", ":", "# Then there is an issue calling sox. Perhaps the input file is an mp4", "# or some other filetype not supported out-of-the-box by sox. So we try", "# using pydub/ffmpeg.", "trim_wav_pydub", "(", "in_path", ",", "out_path", ",", "start_time", ",", "end_time", ")" ]
Extracts part of a WAV File. First attempts to call sox. If sox is unavailable, it backs off to pydub+ffmpeg. Args: in_path: A path to the source file to extract a portion of out_path: A path describing the to-be-created WAV file. start_time: The point in the source WAV file at which to begin extraction. end_time: The point in the source WAV file at which to end extraction.
[ "Extracts", "part", "of", "a", "WAV", "File", "." ]
f94c63e4d5fe719fb1deba449b177bb299d225fb
https://github.com/persephone-tools/persephone/blob/f94c63e4d5fe719fb1deba449b177bb299d225fb/persephone/preprocess/wav.py#L18-L43
train
persephone-tools/persephone
persephone/preprocess/wav.py
trim_wav_pydub
def trim_wav_pydub(in_path: Path, out_path: Path, start_time: int, end_time: int) -> None: """ Crops the wav file. """ logger.info( "Using pydub/ffmpeg to create {} from {}".format(out_path, in_path) + " using a start_time of {} and an end_time of {}".format(start_time, end_time)) if out_path.is_file(): return # TODO add logging here #print("in_fn: {}".format(in_fn)) #print("out_fn: {}".format(out_fn)) in_ext = in_path.suffix[1:] out_ext = out_path.suffix[1:] audio = AudioSegment.from_file(str(in_path), in_ext) trimmed = audio[start_time:end_time] # pydub evidently doesn't actually use the parameters when outputting wavs, # since it doesn't use FFMPEG to deal with outputtting WAVs. This is a bit # of a leaky abstraction. No warning is given, so normalization to 16Khz # mono wavs has to happen later. Leaving the parameters here in case it # changes trimmed.export(str(out_path), format=out_ext, parameters=["-ac", "1", "-ar", "16000"])
python
def trim_wav_pydub(in_path: Path, out_path: Path, start_time: int, end_time: int) -> None: """ Crops the wav file. """ logger.info( "Using pydub/ffmpeg to create {} from {}".format(out_path, in_path) + " using a start_time of {} and an end_time of {}".format(start_time, end_time)) if out_path.is_file(): return # TODO add logging here #print("in_fn: {}".format(in_fn)) #print("out_fn: {}".format(out_fn)) in_ext = in_path.suffix[1:] out_ext = out_path.suffix[1:] audio = AudioSegment.from_file(str(in_path), in_ext) trimmed = audio[start_time:end_time] # pydub evidently doesn't actually use the parameters when outputting wavs, # since it doesn't use FFMPEG to deal with outputtting WAVs. This is a bit # of a leaky abstraction. No warning is given, so normalization to 16Khz # mono wavs has to happen later. Leaving the parameters here in case it # changes trimmed.export(str(out_path), format=out_ext, parameters=["-ac", "1", "-ar", "16000"])
[ "def", "trim_wav_pydub", "(", "in_path", ":", "Path", ",", "out_path", ":", "Path", ",", "start_time", ":", "int", ",", "end_time", ":", "int", ")", "->", "None", ":", "logger", ".", "info", "(", "\"Using pydub/ffmpeg to create {} from {}\"", ".", "format", "(", "out_path", ",", "in_path", ")", "+", "\" using a start_time of {} and an end_time of {}\"", ".", "format", "(", "start_time", ",", "end_time", ")", ")", "if", "out_path", ".", "is_file", "(", ")", ":", "return", "# TODO add logging here", "#print(\"in_fn: {}\".format(in_fn))", "#print(\"out_fn: {}\".format(out_fn))", "in_ext", "=", "in_path", ".", "suffix", "[", "1", ":", "]", "out_ext", "=", "out_path", ".", "suffix", "[", "1", ":", "]", "audio", "=", "AudioSegment", ".", "from_file", "(", "str", "(", "in_path", ")", ",", "in_ext", ")", "trimmed", "=", "audio", "[", "start_time", ":", "end_time", "]", "# pydub evidently doesn't actually use the parameters when outputting wavs,", "# since it doesn't use FFMPEG to deal with outputtting WAVs. This is a bit", "# of a leaky abstraction. No warning is given, so normalization to 16Khz", "# mono wavs has to happen later. Leaving the parameters here in case it", "# changes", "trimmed", ".", "export", "(", "str", "(", "out_path", ")", ",", "format", "=", "out_ext", ",", "parameters", "=", "[", "\"-ac\"", ",", "\"1\"", ",", "\"-ar\"", ",", "\"16000\"", "]", ")" ]
Crops the wav file.
[ "Crops", "the", "wav", "file", "." ]
f94c63e4d5fe719fb1deba449b177bb299d225fb
https://github.com/persephone-tools/persephone/blob/f94c63e4d5fe719fb1deba449b177bb299d225fb/persephone/preprocess/wav.py#L45-L70
train
persephone-tools/persephone
persephone/preprocess/wav.py
trim_wav_sox
def trim_wav_sox(in_path: Path, out_path: Path, start_time: int, end_time: int) -> None: """ Crops the wav file at in_fn so that the audio between start_time and end_time is output to out_fn. Measured in milliseconds. """ if out_path.is_file(): logger.info("Output path %s already exists, not trimming file", out_path) return start_time_secs = millisecs_to_secs(start_time) end_time_secs = millisecs_to_secs(end_time) args = [config.SOX_PATH, str(in_path), str(out_path), "trim", str(start_time_secs), "=" + str(end_time_secs)] logger.info("Cropping file %s, from start time %d (seconds) to end time %d (seconds), outputting to %s", in_path, start_time_secs, end_time_secs, out_path) subprocess.run(args, check=True)
python
def trim_wav_sox(in_path: Path, out_path: Path, start_time: int, end_time: int) -> None: """ Crops the wav file at in_fn so that the audio between start_time and end_time is output to out_fn. Measured in milliseconds. """ if out_path.is_file(): logger.info("Output path %s already exists, not trimming file", out_path) return start_time_secs = millisecs_to_secs(start_time) end_time_secs = millisecs_to_secs(end_time) args = [config.SOX_PATH, str(in_path), str(out_path), "trim", str(start_time_secs), "=" + str(end_time_secs)] logger.info("Cropping file %s, from start time %d (seconds) to end time %d (seconds), outputting to %s", in_path, start_time_secs, end_time_secs, out_path) subprocess.run(args, check=True)
[ "def", "trim_wav_sox", "(", "in_path", ":", "Path", ",", "out_path", ":", "Path", ",", "start_time", ":", "int", ",", "end_time", ":", "int", ")", "->", "None", ":", "if", "out_path", ".", "is_file", "(", ")", ":", "logger", ".", "info", "(", "\"Output path %s already exists, not trimming file\"", ",", "out_path", ")", "return", "start_time_secs", "=", "millisecs_to_secs", "(", "start_time", ")", "end_time_secs", "=", "millisecs_to_secs", "(", "end_time", ")", "args", "=", "[", "config", ".", "SOX_PATH", ",", "str", "(", "in_path", ")", ",", "str", "(", "out_path", ")", ",", "\"trim\"", ",", "str", "(", "start_time_secs", ")", ",", "\"=\"", "+", "str", "(", "end_time_secs", ")", "]", "logger", ".", "info", "(", "\"Cropping file %s, from start time %d (seconds) to end time %d (seconds), outputting to %s\"", ",", "in_path", ",", "start_time_secs", ",", "end_time_secs", ",", "out_path", ")", "subprocess", ".", "run", "(", "args", ",", "check", "=", "True", ")" ]
Crops the wav file at in_fn so that the audio between start_time and end_time is output to out_fn. Measured in milliseconds.
[ "Crops", "the", "wav", "file", "at", "in_fn", "so", "that", "the", "audio", "between", "start_time", "and", "end_time", "is", "output", "to", "out_fn", ".", "Measured", "in", "milliseconds", "." ]
f94c63e4d5fe719fb1deba449b177bb299d225fb
https://github.com/persephone-tools/persephone/blob/f94c63e4d5fe719fb1deba449b177bb299d225fb/persephone/preprocess/wav.py#L72-L88
train
persephone-tools/persephone
persephone/preprocess/wav.py
extract_wavs
def extract_wavs(utterances: List[Utterance], tgt_dir: Path, lazy: bool) -> None: """ Extracts WAVs from the media files associated with a list of Utterance objects and stores it in a target directory. Args: utterances: A list of Utterance objects, which include information about the source media file, and the offset of the utterance in the media_file. tgt_dir: The directory in which to write the output WAVs. lazy: If True, then existing WAVs will not be overwritten if they have the same name """ tgt_dir.mkdir(parents=True, exist_ok=True) for utter in utterances: wav_fn = "{}.{}".format(utter.prefix, "wav") out_wav_path = tgt_dir / wav_fn if lazy and out_wav_path.is_file(): logger.info("File {} already exists and lazy == {}; not " \ "writing.".format(out_wav_path, lazy)) continue logger.info("File {} does not exist and lazy == {}; creating " \ "it.".format(out_wav_path, lazy)) trim_wav_ms(utter.org_media_path, out_wav_path, utter.start_time, utter.end_time)
python
def extract_wavs(utterances: List[Utterance], tgt_dir: Path, lazy: bool) -> None: """ Extracts WAVs from the media files associated with a list of Utterance objects and stores it in a target directory. Args: utterances: A list of Utterance objects, which include information about the source media file, and the offset of the utterance in the media_file. tgt_dir: The directory in which to write the output WAVs. lazy: If True, then existing WAVs will not be overwritten if they have the same name """ tgt_dir.mkdir(parents=True, exist_ok=True) for utter in utterances: wav_fn = "{}.{}".format(utter.prefix, "wav") out_wav_path = tgt_dir / wav_fn if lazy and out_wav_path.is_file(): logger.info("File {} already exists and lazy == {}; not " \ "writing.".format(out_wav_path, lazy)) continue logger.info("File {} does not exist and lazy == {}; creating " \ "it.".format(out_wav_path, lazy)) trim_wav_ms(utter.org_media_path, out_wav_path, utter.start_time, utter.end_time)
[ "def", "extract_wavs", "(", "utterances", ":", "List", "[", "Utterance", "]", ",", "tgt_dir", ":", "Path", ",", "lazy", ":", "bool", ")", "->", "None", ":", "tgt_dir", ".", "mkdir", "(", "parents", "=", "True", ",", "exist_ok", "=", "True", ")", "for", "utter", "in", "utterances", ":", "wav_fn", "=", "\"{}.{}\"", ".", "format", "(", "utter", ".", "prefix", ",", "\"wav\"", ")", "out_wav_path", "=", "tgt_dir", "/", "wav_fn", "if", "lazy", "and", "out_wav_path", ".", "is_file", "(", ")", ":", "logger", ".", "info", "(", "\"File {} already exists and lazy == {}; not \"", "\"writing.\"", ".", "format", "(", "out_wav_path", ",", "lazy", ")", ")", "continue", "logger", ".", "info", "(", "\"File {} does not exist and lazy == {}; creating \"", "\"it.\"", ".", "format", "(", "out_wav_path", ",", "lazy", ")", ")", "trim_wav_ms", "(", "utter", ".", "org_media_path", ",", "out_wav_path", ",", "utter", ".", "start_time", ",", "utter", ".", "end_time", ")" ]
Extracts WAVs from the media files associated with a list of Utterance objects and stores it in a target directory. Args: utterances: A list of Utterance objects, which include information about the source media file, and the offset of the utterance in the media_file. tgt_dir: The directory in which to write the output WAVs. lazy: If True, then existing WAVs will not be overwritten if they have the same name
[ "Extracts", "WAVs", "from", "the", "media", "files", "associated", "with", "a", "list", "of", "Utterance", "objects", "and", "stores", "it", "in", "a", "target", "directory", "." ]
f94c63e4d5fe719fb1deba449b177bb299d225fb
https://github.com/persephone-tools/persephone/blob/f94c63e4d5fe719fb1deba449b177bb299d225fb/persephone/preprocess/wav.py#L90-L114
train
persephone-tools/persephone
persephone/results.py
filter_labels
def filter_labels(sent: Sequence[str], labels: Set[str] = None) -> List[str]: """ Returns only the tokens present in the sentence that are in labels.""" if labels: return [tok for tok in sent if tok in labels] return list(sent)
python
def filter_labels(sent: Sequence[str], labels: Set[str] = None) -> List[str]: """ Returns only the tokens present in the sentence that are in labels.""" if labels: return [tok for tok in sent if tok in labels] return list(sent)
[ "def", "filter_labels", "(", "sent", ":", "Sequence", "[", "str", "]", ",", "labels", ":", "Set", "[", "str", "]", "=", "None", ")", "->", "List", "[", "str", "]", ":", "if", "labels", ":", "return", "[", "tok", "for", "tok", "in", "sent", "if", "tok", "in", "labels", "]", "return", "list", "(", "sent", ")" ]
Returns only the tokens present in the sentence that are in labels.
[ "Returns", "only", "the", "tokens", "present", "in", "the", "sentence", "that", "are", "in", "labels", "." ]
f94c63e4d5fe719fb1deba449b177bb299d225fb
https://github.com/persephone-tools/persephone/blob/f94c63e4d5fe719fb1deba449b177bb299d225fb/persephone/results.py#L11-L16
train
persephone-tools/persephone
persephone/results.py
filtered_error_rate
def filtered_error_rate(hyps_path: Union[str, Path], refs_path: Union[str, Path], labels: Set[str]) -> float: """ Returns the error rate of hypotheses in hyps_path against references in refs_path after filtering only for labels in labels. """ if isinstance(hyps_path, Path): hyps_path = str(hyps_path) if isinstance(refs_path, Path): refs_path = str(refs_path) with open(hyps_path) as hyps_f: lines = hyps_f.readlines() hyps = [filter_labels(line.split(), labels) for line in lines] with open(refs_path) as refs_f: lines = refs_f.readlines() refs = [filter_labels(line.split(), labels) for line in lines] # For the case where there are no tokens left after filtering. only_empty = True for entry in hyps: if entry is not []: only_empty = False break # found something so can move on immediately if only_empty: return -1 return utils.batch_per(hyps, refs)
python
def filtered_error_rate(hyps_path: Union[str, Path], refs_path: Union[str, Path], labels: Set[str]) -> float: """ Returns the error rate of hypotheses in hyps_path against references in refs_path after filtering only for labels in labels. """ if isinstance(hyps_path, Path): hyps_path = str(hyps_path) if isinstance(refs_path, Path): refs_path = str(refs_path) with open(hyps_path) as hyps_f: lines = hyps_f.readlines() hyps = [filter_labels(line.split(), labels) for line in lines] with open(refs_path) as refs_f: lines = refs_f.readlines() refs = [filter_labels(line.split(), labels) for line in lines] # For the case where there are no tokens left after filtering. only_empty = True for entry in hyps: if entry is not []: only_empty = False break # found something so can move on immediately if only_empty: return -1 return utils.batch_per(hyps, refs)
[ "def", "filtered_error_rate", "(", "hyps_path", ":", "Union", "[", "str", ",", "Path", "]", ",", "refs_path", ":", "Union", "[", "str", ",", "Path", "]", ",", "labels", ":", "Set", "[", "str", "]", ")", "->", "float", ":", "if", "isinstance", "(", "hyps_path", ",", "Path", ")", ":", "hyps_path", "=", "str", "(", "hyps_path", ")", "if", "isinstance", "(", "refs_path", ",", "Path", ")", ":", "refs_path", "=", "str", "(", "refs_path", ")", "with", "open", "(", "hyps_path", ")", "as", "hyps_f", ":", "lines", "=", "hyps_f", ".", "readlines", "(", ")", "hyps", "=", "[", "filter_labels", "(", "line", ".", "split", "(", ")", ",", "labels", ")", "for", "line", "in", "lines", "]", "with", "open", "(", "refs_path", ")", "as", "refs_f", ":", "lines", "=", "refs_f", ".", "readlines", "(", ")", "refs", "=", "[", "filter_labels", "(", "line", ".", "split", "(", ")", ",", "labels", ")", "for", "line", "in", "lines", "]", "# For the case where there are no tokens left after filtering.", "only_empty", "=", "True", "for", "entry", "in", "hyps", ":", "if", "entry", "is", "not", "[", "]", ":", "only_empty", "=", "False", "break", "# found something so can move on immediately", "if", "only_empty", ":", "return", "-", "1", "return", "utils", ".", "batch_per", "(", "hyps", ",", "refs", ")" ]
Returns the error rate of hypotheses in hyps_path against references in refs_path after filtering only for labels in labels.
[ "Returns", "the", "error", "rate", "of", "hypotheses", "in", "hyps_path", "against", "references", "in", "refs_path", "after", "filtering", "only", "for", "labels", "in", "labels", "." ]
f94c63e4d5fe719fb1deba449b177bb299d225fb
https://github.com/persephone-tools/persephone/blob/f94c63e4d5fe719fb1deba449b177bb299d225fb/persephone/results.py#L18-L42
train
persephone-tools/persephone
persephone/results.py
fmt_latex_output
def fmt_latex_output(hyps: Sequence[Sequence[str]], refs: Sequence[Sequence[str]], prefixes: Sequence[str], out_fn: Path, ) -> None: """ Output the hypotheses and references to a LaTeX source file for pretty printing. """ alignments_ = [min_edit_distance_align(ref, hyp) for hyp, ref in zip(hyps, refs)] with out_fn.open("w") as out_f: print(latex_header(), file=out_f) print("\\begin{document}\n" "\\begin{longtable}{ll}", file=out_f) print(r"\toprule", file=out_f) for sent in zip(prefixes, alignments_): prefix = sent[0] alignments = sent[1:] print("Utterance ID: &", prefix.strip().replace(r"_", r"\_"), r"\\", file=out_f) for i, alignment in enumerate(alignments): ref_list = [] hyp_list = [] for arrow in alignment: if arrow[0] == arrow[1]: # Then don't highlight it; it's correct. ref_list.append(arrow[0]) hyp_list.append(arrow[1]) else: # Then highlight the errors. ref_list.append("\\hl{%s}" % arrow[0]) hyp_list.append("\\hl{%s}" % arrow[1]) print("Ref: &", "".join(ref_list), r"\\", file=out_f) print("Hyp: &", "".join(hyp_list), r"\\", file=out_f) print(r"\midrule", file=out_f) print(r"\end{longtable}", file=out_f) print(r"\end{document}", file=out_f)
python
def fmt_latex_output(hyps: Sequence[Sequence[str]], refs: Sequence[Sequence[str]], prefixes: Sequence[str], out_fn: Path, ) -> None: """ Output the hypotheses and references to a LaTeX source file for pretty printing. """ alignments_ = [min_edit_distance_align(ref, hyp) for hyp, ref in zip(hyps, refs)] with out_fn.open("w") as out_f: print(latex_header(), file=out_f) print("\\begin{document}\n" "\\begin{longtable}{ll}", file=out_f) print(r"\toprule", file=out_f) for sent in zip(prefixes, alignments_): prefix = sent[0] alignments = sent[1:] print("Utterance ID: &", prefix.strip().replace(r"_", r"\_"), r"\\", file=out_f) for i, alignment in enumerate(alignments): ref_list = [] hyp_list = [] for arrow in alignment: if arrow[0] == arrow[1]: # Then don't highlight it; it's correct. ref_list.append(arrow[0]) hyp_list.append(arrow[1]) else: # Then highlight the errors. ref_list.append("\\hl{%s}" % arrow[0]) hyp_list.append("\\hl{%s}" % arrow[1]) print("Ref: &", "".join(ref_list), r"\\", file=out_f) print("Hyp: &", "".join(hyp_list), r"\\", file=out_f) print(r"\midrule", file=out_f) print(r"\end{longtable}", file=out_f) print(r"\end{document}", file=out_f)
[ "def", "fmt_latex_output", "(", "hyps", ":", "Sequence", "[", "Sequence", "[", "str", "]", "]", ",", "refs", ":", "Sequence", "[", "Sequence", "[", "str", "]", "]", ",", "prefixes", ":", "Sequence", "[", "str", "]", ",", "out_fn", ":", "Path", ",", ")", "->", "None", ":", "alignments_", "=", "[", "min_edit_distance_align", "(", "ref", ",", "hyp", ")", "for", "hyp", ",", "ref", "in", "zip", "(", "hyps", ",", "refs", ")", "]", "with", "out_fn", ".", "open", "(", "\"w\"", ")", "as", "out_f", ":", "print", "(", "latex_header", "(", ")", ",", "file", "=", "out_f", ")", "print", "(", "\"\\\\begin{document}\\n\"", "\"\\\\begin{longtable}{ll}\"", ",", "file", "=", "out_f", ")", "print", "(", "r\"\\toprule\"", ",", "file", "=", "out_f", ")", "for", "sent", "in", "zip", "(", "prefixes", ",", "alignments_", ")", ":", "prefix", "=", "sent", "[", "0", "]", "alignments", "=", "sent", "[", "1", ":", "]", "print", "(", "\"Utterance ID: &\"", ",", "prefix", ".", "strip", "(", ")", ".", "replace", "(", "r\"_\"", ",", "r\"\\_\"", ")", ",", "r\"\\\\\"", ",", "file", "=", "out_f", ")", "for", "i", ",", "alignment", "in", "enumerate", "(", "alignments", ")", ":", "ref_list", "=", "[", "]", "hyp_list", "=", "[", "]", "for", "arrow", "in", "alignment", ":", "if", "arrow", "[", "0", "]", "==", "arrow", "[", "1", "]", ":", "# Then don't highlight it; it's correct.", "ref_list", ".", "append", "(", "arrow", "[", "0", "]", ")", "hyp_list", ".", "append", "(", "arrow", "[", "1", "]", ")", "else", ":", "# Then highlight the errors.", "ref_list", ".", "append", "(", "\"\\\\hl{%s}\"", "%", "arrow", "[", "0", "]", ")", "hyp_list", ".", "append", "(", "\"\\\\hl{%s}\"", "%", "arrow", "[", "1", "]", ")", "print", "(", "\"Ref: &\"", ",", "\"\"", ".", "join", "(", "ref_list", ")", ",", "r\"\\\\\"", ",", "file", "=", "out_f", ")", "print", "(", "\"Hyp: &\"", ",", "\"\"", ".", "join", "(", "hyp_list", ")", ",", "r\"\\\\\"", ",", "file", "=", "out_f", ")", "print", "(", "r\"\\midrule\"", ",", "file", "=", "out_f", ")", "print", "(", "r\"\\end{longtable}\"", ",", "file", "=", "out_f", ")", "print", "(", "r\"\\end{document}\"", ",", "file", "=", "out_f", ")" ]
Output the hypotheses and references to a LaTeX source file for pretty printing.
[ "Output", "the", "hypotheses", "and", "references", "to", "a", "LaTeX", "source", "file", "for", "pretty", "printing", "." ]
f94c63e4d5fe719fb1deba449b177bb299d225fb
https://github.com/persephone-tools/persephone/blob/f94c63e4d5fe719fb1deba449b177bb299d225fb/persephone/results.py#L57-L96
train
persephone-tools/persephone
persephone/results.py
fmt_confusion_matrix
def fmt_confusion_matrix(hyps: Sequence[Sequence[str]], refs: Sequence[Sequence[str]], label_set: Set[str] = None, max_width: int = 25) -> str: """ Formats a confusion matrix over substitutions, ignoring insertions and deletions. """ if not label_set: # Then determine the label set by reading raise NotImplementedError() alignments = [min_edit_distance_align(ref, hyp) for hyp, ref in zip(hyps, refs)] arrow_counter = Counter() # type: Dict[Tuple[str, str], int] for alignment in alignments: arrow_counter.update(alignment) ref_total = Counter() # type: Dict[str, int] for alignment in alignments: ref_total.update([arrow[0] for arrow in alignment]) labels = [label for label, count in sorted(ref_total.items(), key=lambda x: x[1], reverse=True) if label != ""][:max_width] format_pieces = [] fmt = "{:3} "*(len(labels)+1) format_pieces.append(fmt.format(" ", *labels)) fmt = "{:3} " + ("{:<3} " * (len(labels))) for ref in labels: # TODO ref_results = [arrow_counter[(ref, hyp)] for hyp in labels] format_pieces.append(fmt.format(ref, *ref_results)) return "\n".join(format_pieces)
python
def fmt_confusion_matrix(hyps: Sequence[Sequence[str]], refs: Sequence[Sequence[str]], label_set: Set[str] = None, max_width: int = 25) -> str: """ Formats a confusion matrix over substitutions, ignoring insertions and deletions. """ if not label_set: # Then determine the label set by reading raise NotImplementedError() alignments = [min_edit_distance_align(ref, hyp) for hyp, ref in zip(hyps, refs)] arrow_counter = Counter() # type: Dict[Tuple[str, str], int] for alignment in alignments: arrow_counter.update(alignment) ref_total = Counter() # type: Dict[str, int] for alignment in alignments: ref_total.update([arrow[0] for arrow in alignment]) labels = [label for label, count in sorted(ref_total.items(), key=lambda x: x[1], reverse=True) if label != ""][:max_width] format_pieces = [] fmt = "{:3} "*(len(labels)+1) format_pieces.append(fmt.format(" ", *labels)) fmt = "{:3} " + ("{:<3} " * (len(labels))) for ref in labels: # TODO ref_results = [arrow_counter[(ref, hyp)] for hyp in labels] format_pieces.append(fmt.format(ref, *ref_results)) return "\n".join(format_pieces)
[ "def", "fmt_confusion_matrix", "(", "hyps", ":", "Sequence", "[", "Sequence", "[", "str", "]", "]", ",", "refs", ":", "Sequence", "[", "Sequence", "[", "str", "]", "]", ",", "label_set", ":", "Set", "[", "str", "]", "=", "None", ",", "max_width", ":", "int", "=", "25", ")", "->", "str", ":", "if", "not", "label_set", ":", "# Then determine the label set by reading", "raise", "NotImplementedError", "(", ")", "alignments", "=", "[", "min_edit_distance_align", "(", "ref", ",", "hyp", ")", "for", "hyp", ",", "ref", "in", "zip", "(", "hyps", ",", "refs", ")", "]", "arrow_counter", "=", "Counter", "(", ")", "# type: Dict[Tuple[str, str], int]", "for", "alignment", "in", "alignments", ":", "arrow_counter", ".", "update", "(", "alignment", ")", "ref_total", "=", "Counter", "(", ")", "# type: Dict[str, int]", "for", "alignment", "in", "alignments", ":", "ref_total", ".", "update", "(", "[", "arrow", "[", "0", "]", "for", "arrow", "in", "alignment", "]", ")", "labels", "=", "[", "label", "for", "label", ",", "count", "in", "sorted", "(", "ref_total", ".", "items", "(", ")", ",", "key", "=", "lambda", "x", ":", "x", "[", "1", "]", ",", "reverse", "=", "True", ")", "if", "label", "!=", "\"\"", "]", "[", ":", "max_width", "]", "format_pieces", "=", "[", "]", "fmt", "=", "\"{:3} \"", "*", "(", "len", "(", "labels", ")", "+", "1", ")", "format_pieces", ".", "append", "(", "fmt", ".", "format", "(", "\" \"", ",", "*", "labels", ")", ")", "fmt", "=", "\"{:3} \"", "+", "(", "\"{:<3} \"", "*", "(", "len", "(", "labels", ")", ")", ")", "for", "ref", "in", "labels", ":", "# TODO", "ref_results", "=", "[", "arrow_counter", "[", "(", "ref", ",", "hyp", ")", "]", "for", "hyp", "in", "labels", "]", "format_pieces", ".", "append", "(", "fmt", ".", "format", "(", "ref", ",", "*", "ref_results", ")", ")", "return", "\"\\n\"", ".", "join", "(", "format_pieces", ")" ]
Formats a confusion matrix over substitutions, ignoring insertions and deletions.
[ "Formats", "a", "confusion", "matrix", "over", "substitutions", "ignoring", "insertions", "and", "deletions", "." ]
f94c63e4d5fe719fb1deba449b177bb299d225fb
https://github.com/persephone-tools/persephone/blob/f94c63e4d5fe719fb1deba449b177bb299d225fb/persephone/results.py#L132-L167
train
persephone-tools/persephone
persephone/results.py
fmt_latex_untranscribed
def fmt_latex_untranscribed(hyps: Sequence[Sequence[str]], prefixes: Sequence[str], out_fn: Path) -> None: """ Formats automatic hypotheses that have not previously been transcribed in LaTeX. """ hyps_prefixes = list(zip(hyps, prefixes)) def utter_id_key(hyp_prefix): hyp, prefix = hyp_prefix prefix_split = prefix.split(".") return (prefix_split[0], int(prefix_split[1])) hyps_prefixes.sort(key=utter_id_key) with out_fn.open("w") as out_f: print(latex_header(), file=out_f) print("\\begin{document}\n" "\\begin{longtable}{ll}", file=out_f) print(r"\toprule", file=out_f) for hyp, prefix in hyps_prefixes: print("Utterance ID: &", prefix.strip().replace(r"_", r"\_"), "\\\\", file=out_f) print("Hypothesis: &", hyp, r"\\", file=out_f) print("\\midrule", file=out_f) print(r"\end{longtable}", file=out_f) print(r"\end{document}", file=out_f)
python
def fmt_latex_untranscribed(hyps: Sequence[Sequence[str]], prefixes: Sequence[str], out_fn: Path) -> None: """ Formats automatic hypotheses that have not previously been transcribed in LaTeX. """ hyps_prefixes = list(zip(hyps, prefixes)) def utter_id_key(hyp_prefix): hyp, prefix = hyp_prefix prefix_split = prefix.split(".") return (prefix_split[0], int(prefix_split[1])) hyps_prefixes.sort(key=utter_id_key) with out_fn.open("w") as out_f: print(latex_header(), file=out_f) print("\\begin{document}\n" "\\begin{longtable}{ll}", file=out_f) print(r"\toprule", file=out_f) for hyp, prefix in hyps_prefixes: print("Utterance ID: &", prefix.strip().replace(r"_", r"\_"), "\\\\", file=out_f) print("Hypothesis: &", hyp, r"\\", file=out_f) print("\\midrule", file=out_f) print(r"\end{longtable}", file=out_f) print(r"\end{document}", file=out_f)
[ "def", "fmt_latex_untranscribed", "(", "hyps", ":", "Sequence", "[", "Sequence", "[", "str", "]", "]", ",", "prefixes", ":", "Sequence", "[", "str", "]", ",", "out_fn", ":", "Path", ")", "->", "None", ":", "hyps_prefixes", "=", "list", "(", "zip", "(", "hyps", ",", "prefixes", ")", ")", "def", "utter_id_key", "(", "hyp_prefix", ")", ":", "hyp", ",", "prefix", "=", "hyp_prefix", "prefix_split", "=", "prefix", ".", "split", "(", "\".\"", ")", "return", "(", "prefix_split", "[", "0", "]", ",", "int", "(", "prefix_split", "[", "1", "]", ")", ")", "hyps_prefixes", ".", "sort", "(", "key", "=", "utter_id_key", ")", "with", "out_fn", ".", "open", "(", "\"w\"", ")", "as", "out_f", ":", "print", "(", "latex_header", "(", ")", ",", "file", "=", "out_f", ")", "print", "(", "\"\\\\begin{document}\\n\"", "\"\\\\begin{longtable}{ll}\"", ",", "file", "=", "out_f", ")", "print", "(", "r\"\\toprule\"", ",", "file", "=", "out_f", ")", "for", "hyp", ",", "prefix", "in", "hyps_prefixes", ":", "print", "(", "\"Utterance ID: &\"", ",", "prefix", ".", "strip", "(", ")", ".", "replace", "(", "r\"_\"", ",", "r\"\\_\"", ")", ",", "\"\\\\\\\\\"", ",", "file", "=", "out_f", ")", "print", "(", "\"Hypothesis: &\"", ",", "hyp", ",", "r\"\\\\\"", ",", "file", "=", "out_f", ")", "print", "(", "\"\\\\midrule\"", ",", "file", "=", "out_f", ")", "print", "(", "r\"\\end{longtable}\"", ",", "file", "=", "out_f", ")", "print", "(", "r\"\\end{document}\"", ",", "file", "=", "out_f", ")" ]
Formats automatic hypotheses that have not previously been transcribed in LaTeX.
[ "Formats", "automatic", "hypotheses", "that", "have", "not", "previously", "been", "transcribed", "in", "LaTeX", "." ]
f94c63e4d5fe719fb1deba449b177bb299d225fb
https://github.com/persephone-tools/persephone/blob/f94c63e4d5fe719fb1deba449b177bb299d225fb/persephone/results.py#L169-L192
train
persephone-tools/persephone
persephone/preprocess/labels.py
segment_into_chars
def segment_into_chars(utterance: str) -> str: """ Segments an utterance into space delimited characters. """ if not isinstance(utterance, str): raise TypeError("Input type must be a string. Got {}.".format(type(utterance))) utterance.strip() utterance = utterance.replace(" ", "") return " ".join(utterance)
python
def segment_into_chars(utterance: str) -> str: """ Segments an utterance into space delimited characters. """ if not isinstance(utterance, str): raise TypeError("Input type must be a string. Got {}.".format(type(utterance))) utterance.strip() utterance = utterance.replace(" ", "") return " ".join(utterance)
[ "def", "segment_into_chars", "(", "utterance", ":", "str", ")", "->", "str", ":", "if", "not", "isinstance", "(", "utterance", ",", "str", ")", ":", "raise", "TypeError", "(", "\"Input type must be a string. Got {}.\"", ".", "format", "(", "type", "(", "utterance", ")", ")", ")", "utterance", ".", "strip", "(", ")", "utterance", "=", "utterance", ".", "replace", "(", "\" \"", ",", "\"\"", ")", "return", "\" \"", ".", "join", "(", "utterance", ")" ]
Segments an utterance into space delimited characters.
[ "Segments", "an", "utterance", "into", "space", "delimited", "characters", "." ]
f94c63e4d5fe719fb1deba449b177bb299d225fb
https://github.com/persephone-tools/persephone/blob/f94c63e4d5fe719fb1deba449b177bb299d225fb/persephone/preprocess/labels.py#L28-L36
train
persephone-tools/persephone
persephone/preprocess/labels.py
make_indices_to_labels
def make_indices_to_labels(labels: Set[str]) -> Dict[int, str]: """ Creates a mapping from indices to labels. """ return {index: label for index, label in enumerate(["pad"] + sorted(list(labels)))}
python
def make_indices_to_labels(labels: Set[str]) -> Dict[int, str]: """ Creates a mapping from indices to labels. """ return {index: label for index, label in enumerate(["pad"] + sorted(list(labels)))}
[ "def", "make_indices_to_labels", "(", "labels", ":", "Set", "[", "str", "]", ")", "->", "Dict", "[", "int", ",", "str", "]", ":", "return", "{", "index", ":", "label", "for", "index", ",", "label", "in", "enumerate", "(", "[", "\"pad\"", "]", "+", "sorted", "(", "list", "(", "labels", ")", ")", ")", "}" ]
Creates a mapping from indices to labels.
[ "Creates", "a", "mapping", "from", "indices", "to", "labels", "." ]
f94c63e4d5fe719fb1deba449b177bb299d225fb
https://github.com/persephone-tools/persephone/blob/f94c63e4d5fe719fb1deba449b177bb299d225fb/persephone/preprocess/labels.py#L81-L85
train
persephone-tools/persephone
persephone/datasets/na.py
preprocess_french
def preprocess_french(trans, fr_nlp, remove_brackets_content=True): """ Takes a list of sentences in french and preprocesses them.""" if remove_brackets_content: trans = pangloss.remove_content_in_brackets(trans, "[]") # Not sure why I have to split and rejoin, but that fixes a Spacy token # error. trans = fr_nlp(" ".join(trans.split()[:])) #trans = fr_nlp(trans) trans = " ".join([token.lower_ for token in trans if not token.is_punct]) return trans
python
def preprocess_french(trans, fr_nlp, remove_brackets_content=True): """ Takes a list of sentences in french and preprocesses them.""" if remove_brackets_content: trans = pangloss.remove_content_in_brackets(trans, "[]") # Not sure why I have to split and rejoin, but that fixes a Spacy token # error. trans = fr_nlp(" ".join(trans.split()[:])) #trans = fr_nlp(trans) trans = " ".join([token.lower_ for token in trans if not token.is_punct]) return trans
[ "def", "preprocess_french", "(", "trans", ",", "fr_nlp", ",", "remove_brackets_content", "=", "True", ")", ":", "if", "remove_brackets_content", ":", "trans", "=", "pangloss", ".", "remove_content_in_brackets", "(", "trans", ",", "\"[]\"", ")", "# Not sure why I have to split and rejoin, but that fixes a Spacy token", "# error.", "trans", "=", "fr_nlp", "(", "\" \"", ".", "join", "(", "trans", ".", "split", "(", ")", "[", ":", "]", ")", ")", "#trans = fr_nlp(trans)", "trans", "=", "\" \"", ".", "join", "(", "[", "token", ".", "lower_", "for", "token", "in", "trans", "if", "not", "token", ".", "is_punct", "]", ")", "return", "trans" ]
Takes a list of sentences in french and preprocesses them.
[ "Takes", "a", "list", "of", "sentences", "in", "french", "and", "preprocesses", "them", "." ]
f94c63e4d5fe719fb1deba449b177bb299d225fb
https://github.com/persephone-tools/persephone/blob/f94c63e4d5fe719fb1deba449b177bb299d225fb/persephone/datasets/na.py#L209-L220
train
persephone-tools/persephone
persephone/datasets/na.py
trim_wavs
def trim_wavs(org_wav_dir=ORG_WAV_DIR, tgt_wav_dir=TGT_WAV_DIR, org_xml_dir=ORG_XML_DIR): """ Extracts sentence-level transcriptions, translations and wavs from the Na Pangloss XML and WAV files. But otherwise doesn't preprocess them.""" logging.info("Trimming wavs...") if not os.path.exists(os.path.join(tgt_wav_dir, "TEXT")): os.makedirs(os.path.join(tgt_wav_dir, "TEXT")) if not os.path.exists(os.path.join(tgt_wav_dir, "WORDLIST")): os.makedirs(os.path.join(tgt_wav_dir, "WORDLIST")) for fn in os.listdir(org_xml_dir): path = os.path.join(org_xml_dir, fn) prefix, _ = os.path.splitext(fn) if os.path.isdir(path): continue if not path.endswith(".xml"): continue logging.info("Trimming wavs from {}".format(fn)) rec_type, _, times, _ = pangloss.get_sents_times_and_translations(path) # Extract the wavs given the times. for i, (start_time, end_time) in enumerate(times): if prefix.endswith("PLUSEGG"): in_wav_path = os.path.join(org_wav_dir, prefix.upper()[:-len("PLUSEGG")]) + ".wav" else: in_wav_path = os.path.join(org_wav_dir, prefix.upper()) + ".wav" headmic_path = os.path.join(org_wav_dir, prefix.upper()) + "_HEADMIC.wav" if os.path.isfile(headmic_path): in_wav_path = headmic_path out_wav_path = os.path.join(tgt_wav_dir, rec_type, "%s.%d.wav" % (prefix, i)) if not os.path.isfile(in_wav_path): raise PersephoneException("{} not a file.".format(in_wav_path)) start_time = start_time * ureg.seconds end_time = end_time * ureg.seconds wav.trim_wav_ms(Path(in_wav_path), Path(out_wav_path), start_time.to(ureg.milliseconds).magnitude, end_time.to(ureg.milliseconds).magnitude)
python
def trim_wavs(org_wav_dir=ORG_WAV_DIR, tgt_wav_dir=TGT_WAV_DIR, org_xml_dir=ORG_XML_DIR): """ Extracts sentence-level transcriptions, translations and wavs from the Na Pangloss XML and WAV files. But otherwise doesn't preprocess them.""" logging.info("Trimming wavs...") if not os.path.exists(os.path.join(tgt_wav_dir, "TEXT")): os.makedirs(os.path.join(tgt_wav_dir, "TEXT")) if not os.path.exists(os.path.join(tgt_wav_dir, "WORDLIST")): os.makedirs(os.path.join(tgt_wav_dir, "WORDLIST")) for fn in os.listdir(org_xml_dir): path = os.path.join(org_xml_dir, fn) prefix, _ = os.path.splitext(fn) if os.path.isdir(path): continue if not path.endswith(".xml"): continue logging.info("Trimming wavs from {}".format(fn)) rec_type, _, times, _ = pangloss.get_sents_times_and_translations(path) # Extract the wavs given the times. for i, (start_time, end_time) in enumerate(times): if prefix.endswith("PLUSEGG"): in_wav_path = os.path.join(org_wav_dir, prefix.upper()[:-len("PLUSEGG")]) + ".wav" else: in_wav_path = os.path.join(org_wav_dir, prefix.upper()) + ".wav" headmic_path = os.path.join(org_wav_dir, prefix.upper()) + "_HEADMIC.wav" if os.path.isfile(headmic_path): in_wav_path = headmic_path out_wav_path = os.path.join(tgt_wav_dir, rec_type, "%s.%d.wav" % (prefix, i)) if not os.path.isfile(in_wav_path): raise PersephoneException("{} not a file.".format(in_wav_path)) start_time = start_time * ureg.seconds end_time = end_time * ureg.seconds wav.trim_wav_ms(Path(in_wav_path), Path(out_wav_path), start_time.to(ureg.milliseconds).magnitude, end_time.to(ureg.milliseconds).magnitude)
[ "def", "trim_wavs", "(", "org_wav_dir", "=", "ORG_WAV_DIR", ",", "tgt_wav_dir", "=", "TGT_WAV_DIR", ",", "org_xml_dir", "=", "ORG_XML_DIR", ")", ":", "logging", ".", "info", "(", "\"Trimming wavs...\"", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "os", ".", "path", ".", "join", "(", "tgt_wav_dir", ",", "\"TEXT\"", ")", ")", ":", "os", ".", "makedirs", "(", "os", ".", "path", ".", "join", "(", "tgt_wav_dir", ",", "\"TEXT\"", ")", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "os", ".", "path", ".", "join", "(", "tgt_wav_dir", ",", "\"WORDLIST\"", ")", ")", ":", "os", ".", "makedirs", "(", "os", ".", "path", ".", "join", "(", "tgt_wav_dir", ",", "\"WORDLIST\"", ")", ")", "for", "fn", "in", "os", ".", "listdir", "(", "org_xml_dir", ")", ":", "path", "=", "os", ".", "path", ".", "join", "(", "org_xml_dir", ",", "fn", ")", "prefix", ",", "_", "=", "os", ".", "path", ".", "splitext", "(", "fn", ")", "if", "os", ".", "path", ".", "isdir", "(", "path", ")", ":", "continue", "if", "not", "path", ".", "endswith", "(", "\".xml\"", ")", ":", "continue", "logging", ".", "info", "(", "\"Trimming wavs from {}\"", ".", "format", "(", "fn", ")", ")", "rec_type", ",", "_", ",", "times", ",", "_", "=", "pangloss", ".", "get_sents_times_and_translations", "(", "path", ")", "# Extract the wavs given the times.", "for", "i", ",", "(", "start_time", ",", "end_time", ")", "in", "enumerate", "(", "times", ")", ":", "if", "prefix", ".", "endswith", "(", "\"PLUSEGG\"", ")", ":", "in_wav_path", "=", "os", ".", "path", ".", "join", "(", "org_wav_dir", ",", "prefix", ".", "upper", "(", ")", "[", ":", "-", "len", "(", "\"PLUSEGG\"", ")", "]", ")", "+", "\".wav\"", "else", ":", "in_wav_path", "=", "os", ".", "path", ".", "join", "(", "org_wav_dir", ",", "prefix", ".", "upper", "(", ")", ")", "+", "\".wav\"", "headmic_path", "=", "os", ".", "path", ".", "join", "(", "org_wav_dir", ",", "prefix", ".", "upper", "(", ")", ")", "+", "\"_HEADMIC.wav\"", "if", "os", ".", "path", ".", "isfile", "(", "headmic_path", ")", ":", "in_wav_path", "=", "headmic_path", "out_wav_path", "=", "os", ".", "path", ".", "join", "(", "tgt_wav_dir", ",", "rec_type", ",", "\"%s.%d.wav\"", "%", "(", "prefix", ",", "i", ")", ")", "if", "not", "os", ".", "path", ".", "isfile", "(", "in_wav_path", ")", ":", "raise", "PersephoneException", "(", "\"{} not a file.\"", ".", "format", "(", "in_wav_path", ")", ")", "start_time", "=", "start_time", "*", "ureg", ".", "seconds", "end_time", "=", "end_time", "*", "ureg", ".", "seconds", "wav", ".", "trim_wav_ms", "(", "Path", "(", "in_wav_path", ")", ",", "Path", "(", "out_wav_path", ")", ",", "start_time", ".", "to", "(", "ureg", ".", "milliseconds", ")", ".", "magnitude", ",", "end_time", ".", "to", "(", "ureg", ".", "milliseconds", ")", ".", "magnitude", ")" ]
Extracts sentence-level transcriptions, translations and wavs from the Na Pangloss XML and WAV files. But otherwise doesn't preprocess them.
[ "Extracts", "sentence", "-", "level", "transcriptions", "translations", "and", "wavs", "from", "the", "Na", "Pangloss", "XML", "and", "WAV", "files", ".", "But", "otherwise", "doesn", "t", "preprocess", "them", "." ]
f94c63e4d5fe719fb1deba449b177bb299d225fb
https://github.com/persephone-tools/persephone/blob/f94c63e4d5fe719fb1deba449b177bb299d225fb/persephone/datasets/na.py#L222-L265
train
persephone-tools/persephone
persephone/datasets/na.py
prepare_labels
def prepare_labels(label_type, org_xml_dir=ORG_XML_DIR, label_dir=LABEL_DIR): """ Prepare the neural network output targets.""" if not os.path.exists(os.path.join(label_dir, "TEXT")): os.makedirs(os.path.join(label_dir, "TEXT")) if not os.path.exists(os.path.join(label_dir, "WORDLIST")): os.makedirs(os.path.join(label_dir, "WORDLIST")) for path in Path(org_xml_dir).glob("*.xml"): fn = path.name prefix, _ = os.path.splitext(fn) rec_type, sents, _, _ = pangloss.get_sents_times_and_translations(str(path)) # Write the sentence transcriptions to file sents = [preprocess_na(sent, label_type) for sent in sents] for i, sent in enumerate(sents): if sent.strip() == "": # Then there's no transcription, so ignore this. continue out_fn = "%s.%d.%s" % (prefix, i, label_type) sent_path = os.path.join(label_dir, rec_type, out_fn) with open(sent_path, "w") as sent_f: print(sent, file=sent_f)
python
def prepare_labels(label_type, org_xml_dir=ORG_XML_DIR, label_dir=LABEL_DIR): """ Prepare the neural network output targets.""" if not os.path.exists(os.path.join(label_dir, "TEXT")): os.makedirs(os.path.join(label_dir, "TEXT")) if not os.path.exists(os.path.join(label_dir, "WORDLIST")): os.makedirs(os.path.join(label_dir, "WORDLIST")) for path in Path(org_xml_dir).glob("*.xml"): fn = path.name prefix, _ = os.path.splitext(fn) rec_type, sents, _, _ = pangloss.get_sents_times_and_translations(str(path)) # Write the sentence transcriptions to file sents = [preprocess_na(sent, label_type) for sent in sents] for i, sent in enumerate(sents): if sent.strip() == "": # Then there's no transcription, so ignore this. continue out_fn = "%s.%d.%s" % (prefix, i, label_type) sent_path = os.path.join(label_dir, rec_type, out_fn) with open(sent_path, "w") as sent_f: print(sent, file=sent_f)
[ "def", "prepare_labels", "(", "label_type", ",", "org_xml_dir", "=", "ORG_XML_DIR", ",", "label_dir", "=", "LABEL_DIR", ")", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "os", ".", "path", ".", "join", "(", "label_dir", ",", "\"TEXT\"", ")", ")", ":", "os", ".", "makedirs", "(", "os", ".", "path", ".", "join", "(", "label_dir", ",", "\"TEXT\"", ")", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "os", ".", "path", ".", "join", "(", "label_dir", ",", "\"WORDLIST\"", ")", ")", ":", "os", ".", "makedirs", "(", "os", ".", "path", ".", "join", "(", "label_dir", ",", "\"WORDLIST\"", ")", ")", "for", "path", "in", "Path", "(", "org_xml_dir", ")", ".", "glob", "(", "\"*.xml\"", ")", ":", "fn", "=", "path", ".", "name", "prefix", ",", "_", "=", "os", ".", "path", ".", "splitext", "(", "fn", ")", "rec_type", ",", "sents", ",", "_", ",", "_", "=", "pangloss", ".", "get_sents_times_and_translations", "(", "str", "(", "path", ")", ")", "# Write the sentence transcriptions to file", "sents", "=", "[", "preprocess_na", "(", "sent", ",", "label_type", ")", "for", "sent", "in", "sents", "]", "for", "i", ",", "sent", "in", "enumerate", "(", "sents", ")", ":", "if", "sent", ".", "strip", "(", ")", "==", "\"\"", ":", "# Then there's no transcription, so ignore this.", "continue", "out_fn", "=", "\"%s.%d.%s\"", "%", "(", "prefix", ",", "i", ",", "label_type", ")", "sent_path", "=", "os", ".", "path", ".", "join", "(", "label_dir", ",", "rec_type", ",", "out_fn", ")", "with", "open", "(", "sent_path", ",", "\"w\"", ")", "as", "sent_f", ":", "print", "(", "sent", ",", "file", "=", "sent_f", ")" ]
Prepare the neural network output targets.
[ "Prepare", "the", "neural", "network", "output", "targets", "." ]
f94c63e4d5fe719fb1deba449b177bb299d225fb
https://github.com/persephone-tools/persephone/blob/f94c63e4d5fe719fb1deba449b177bb299d225fb/persephone/datasets/na.py#L267-L289
train
persephone-tools/persephone
persephone/datasets/na.py
prepare_untran
def prepare_untran(feat_type, tgt_dir, untran_dir): """ Preprocesses untranscribed audio.""" org_dir = str(untran_dir) wav_dir = os.path.join(str(tgt_dir), "wav", "untranscribed") feat_dir = os.path.join(str(tgt_dir), "feat", "untranscribed") if not os.path.isdir(wav_dir): os.makedirs(wav_dir) if not os.path.isdir(feat_dir): os.makedirs(feat_dir) # Standardize into wav files for fn in os.listdir(org_dir): in_path = os.path.join(org_dir, fn) prefix, _ = os.path.splitext(fn) mono16k_wav_path = os.path.join(wav_dir, "%s.wav" % prefix) if not os.path.isfile(mono16k_wav_path): feat_extract.convert_wav(Path(in_path), Path(mono16k_wav_path)) # Split up the wavs and write prefixes to prefix file. wav_fns = os.listdir(wav_dir) with (tgt_dir / "untranscribed_prefixes.txt").open("w") as prefix_f: for fn in wav_fns: in_fn = os.path.join(wav_dir, fn) prefix, _ = os.path.splitext(fn) # Split into sub-wavs and perform feat extraction. split_id = 0 start, end = 0, 10 #in seconds length = utils.wav_length(in_fn) while True: sub_wav_prefix = "{}.{}".format(prefix, split_id) print(sub_wav_prefix, file=prefix_f) out_fn = os.path.join(feat_dir, "{}.wav".format(sub_wav_prefix)) start_time = start * ureg.seconds end_time = end * ureg.seconds if not Path(out_fn).is_file(): wav.trim_wav_ms(Path(in_fn), Path(out_fn), start_time.to(ureg.milliseconds).magnitude, end_time.to(ureg.milliseconds).magnitude) if end > length: break start += 10 end += 10 split_id += 1 # Do feat extraction. feat_extract.from_dir(Path(os.path.join(feat_dir)), feat_type=feat_type)
python
def prepare_untran(feat_type, tgt_dir, untran_dir): """ Preprocesses untranscribed audio.""" org_dir = str(untran_dir) wav_dir = os.path.join(str(tgt_dir), "wav", "untranscribed") feat_dir = os.path.join(str(tgt_dir), "feat", "untranscribed") if not os.path.isdir(wav_dir): os.makedirs(wav_dir) if not os.path.isdir(feat_dir): os.makedirs(feat_dir) # Standardize into wav files for fn in os.listdir(org_dir): in_path = os.path.join(org_dir, fn) prefix, _ = os.path.splitext(fn) mono16k_wav_path = os.path.join(wav_dir, "%s.wav" % prefix) if not os.path.isfile(mono16k_wav_path): feat_extract.convert_wav(Path(in_path), Path(mono16k_wav_path)) # Split up the wavs and write prefixes to prefix file. wav_fns = os.listdir(wav_dir) with (tgt_dir / "untranscribed_prefixes.txt").open("w") as prefix_f: for fn in wav_fns: in_fn = os.path.join(wav_dir, fn) prefix, _ = os.path.splitext(fn) # Split into sub-wavs and perform feat extraction. split_id = 0 start, end = 0, 10 #in seconds length = utils.wav_length(in_fn) while True: sub_wav_prefix = "{}.{}".format(prefix, split_id) print(sub_wav_prefix, file=prefix_f) out_fn = os.path.join(feat_dir, "{}.wav".format(sub_wav_prefix)) start_time = start * ureg.seconds end_time = end * ureg.seconds if not Path(out_fn).is_file(): wav.trim_wav_ms(Path(in_fn), Path(out_fn), start_time.to(ureg.milliseconds).magnitude, end_time.to(ureg.milliseconds).magnitude) if end > length: break start += 10 end += 10 split_id += 1 # Do feat extraction. feat_extract.from_dir(Path(os.path.join(feat_dir)), feat_type=feat_type)
[ "def", "prepare_untran", "(", "feat_type", ",", "tgt_dir", ",", "untran_dir", ")", ":", "org_dir", "=", "str", "(", "untran_dir", ")", "wav_dir", "=", "os", ".", "path", ".", "join", "(", "str", "(", "tgt_dir", ")", ",", "\"wav\"", ",", "\"untranscribed\"", ")", "feat_dir", "=", "os", ".", "path", ".", "join", "(", "str", "(", "tgt_dir", ")", ",", "\"feat\"", ",", "\"untranscribed\"", ")", "if", "not", "os", ".", "path", ".", "isdir", "(", "wav_dir", ")", ":", "os", ".", "makedirs", "(", "wav_dir", ")", "if", "not", "os", ".", "path", ".", "isdir", "(", "feat_dir", ")", ":", "os", ".", "makedirs", "(", "feat_dir", ")", "# Standardize into wav files", "for", "fn", "in", "os", ".", "listdir", "(", "org_dir", ")", ":", "in_path", "=", "os", ".", "path", ".", "join", "(", "org_dir", ",", "fn", ")", "prefix", ",", "_", "=", "os", ".", "path", ".", "splitext", "(", "fn", ")", "mono16k_wav_path", "=", "os", ".", "path", ".", "join", "(", "wav_dir", ",", "\"%s.wav\"", "%", "prefix", ")", "if", "not", "os", ".", "path", ".", "isfile", "(", "mono16k_wav_path", ")", ":", "feat_extract", ".", "convert_wav", "(", "Path", "(", "in_path", ")", ",", "Path", "(", "mono16k_wav_path", ")", ")", "# Split up the wavs and write prefixes to prefix file.", "wav_fns", "=", "os", ".", "listdir", "(", "wav_dir", ")", "with", "(", "tgt_dir", "/", "\"untranscribed_prefixes.txt\"", ")", ".", "open", "(", "\"w\"", ")", "as", "prefix_f", ":", "for", "fn", "in", "wav_fns", ":", "in_fn", "=", "os", ".", "path", ".", "join", "(", "wav_dir", ",", "fn", ")", "prefix", ",", "_", "=", "os", ".", "path", ".", "splitext", "(", "fn", ")", "# Split into sub-wavs and perform feat extraction.", "split_id", "=", "0", "start", ",", "end", "=", "0", ",", "10", "#in seconds", "length", "=", "utils", ".", "wav_length", "(", "in_fn", ")", "while", "True", ":", "sub_wav_prefix", "=", "\"{}.{}\"", ".", "format", "(", "prefix", ",", "split_id", ")", "print", "(", "sub_wav_prefix", ",", "file", "=", "prefix_f", ")", "out_fn", "=", "os", ".", "path", ".", "join", "(", "feat_dir", ",", "\"{}.wav\"", ".", "format", "(", "sub_wav_prefix", ")", ")", "start_time", "=", "start", "*", "ureg", ".", "seconds", "end_time", "=", "end", "*", "ureg", ".", "seconds", "if", "not", "Path", "(", "out_fn", ")", ".", "is_file", "(", ")", ":", "wav", ".", "trim_wav_ms", "(", "Path", "(", "in_fn", ")", ",", "Path", "(", "out_fn", ")", ",", "start_time", ".", "to", "(", "ureg", ".", "milliseconds", ")", ".", "magnitude", ",", "end_time", ".", "to", "(", "ureg", ".", "milliseconds", ")", ".", "magnitude", ")", "if", "end", ">", "length", ":", "break", "start", "+=", "10", "end", "+=", "10", "split_id", "+=", "1", "# Do feat extraction.", "feat_extract", ".", "from_dir", "(", "Path", "(", "os", ".", "path", ".", "join", "(", "feat_dir", ")", ")", ",", "feat_type", "=", "feat_type", ")" ]
Preprocesses untranscribed audio.
[ "Preprocesses", "untranscribed", "audio", "." ]
f94c63e4d5fe719fb1deba449b177bb299d225fb
https://github.com/persephone-tools/persephone/blob/f94c63e4d5fe719fb1deba449b177bb299d225fb/persephone/datasets/na.py#L292-L337
train
persephone-tools/persephone
persephone/datasets/na.py
prepare_feats
def prepare_feats(feat_type, org_wav_dir=ORG_WAV_DIR, feat_dir=FEAT_DIR, tgt_wav_dir=TGT_WAV_DIR, org_xml_dir=ORG_XML_DIR, label_dir=LABEL_DIR): """ Prepare the input features.""" if not os.path.isdir(TGT_DIR): os.makedirs(TGT_DIR) if not os.path.isdir(FEAT_DIR): os.makedirs(FEAT_DIR) if not os.path.isdir(os.path.join(feat_dir, "WORDLIST")): os.makedirs(os.path.join(feat_dir, "WORDLIST")) if not os.path.isdir(os.path.join(feat_dir, "TEXT")): os.makedirs(os.path.join(feat_dir, "TEXT")) # Extract utterances from WAVS. trim_wavs(org_wav_dir=org_wav_dir, tgt_wav_dir=tgt_wav_dir, org_xml_dir=org_xml_dir) # TODO Currently assumes that the wav trimming from XML has already been # done. prefixes = [] for fn in os.listdir(os.path.join(tgt_wav_dir, "WORDLIST")): if fn.endswith(".wav"): pre, _ = os.path.splitext(fn) prefixes.append(os.path.join("WORDLIST", pre)) for fn in os.listdir(os.path.join(tgt_wav_dir, "TEXT")): if fn.endswith(".wav"): pre, _ = os.path.splitext(fn) prefixes.append(os.path.join("TEXT", pre)) if feat_type=="phonemes_onehot": import numpy as np #prepare_labels("phonemes") for prefix in prefixes: label_fn = os.path.join(label_dir, "%s.phonemes" % prefix) out_fn = os.path.join(feat_dir, "%s.phonemes_onehot" % prefix) try: with open(label_fn) as label_f: labels = label_f.readlines()[0].split() except FileNotFoundError: continue indices = [PHONEMES_TO_INDICES[label] for label in labels] one_hots = [[0]*len(PHONEMES) for _ in labels] for i, index in enumerate(indices): one_hots[i][index] = 1 one_hots = np.array(one_hots) np.save(out_fn, one_hots) else: # Otherwise, for prefix in prefixes: # Convert the wave to 16k mono. wav_fn = os.path.join(tgt_wav_dir, "%s.wav" % prefix) mono16k_wav_fn = os.path.join(feat_dir, "%s.wav" % prefix) if not os.path.isfile(mono16k_wav_fn): logging.info("Normalizing wav {} to a 16k 16KHz mono {}".format( wav_fn, mono16k_wav_fn)) feat_extract.convert_wav(wav_fn, mono16k_wav_fn) # Extract features from the wavs. feat_extract.from_dir(Path(os.path.join(feat_dir, "WORDLIST")), feat_type=feat_type) feat_extract.from_dir(Path(os.path.join(feat_dir, "TEXT")), feat_type=feat_type)
python
def prepare_feats(feat_type, org_wav_dir=ORG_WAV_DIR, feat_dir=FEAT_DIR, tgt_wav_dir=TGT_WAV_DIR, org_xml_dir=ORG_XML_DIR, label_dir=LABEL_DIR): """ Prepare the input features.""" if not os.path.isdir(TGT_DIR): os.makedirs(TGT_DIR) if not os.path.isdir(FEAT_DIR): os.makedirs(FEAT_DIR) if not os.path.isdir(os.path.join(feat_dir, "WORDLIST")): os.makedirs(os.path.join(feat_dir, "WORDLIST")) if not os.path.isdir(os.path.join(feat_dir, "TEXT")): os.makedirs(os.path.join(feat_dir, "TEXT")) # Extract utterances from WAVS. trim_wavs(org_wav_dir=org_wav_dir, tgt_wav_dir=tgt_wav_dir, org_xml_dir=org_xml_dir) # TODO Currently assumes that the wav trimming from XML has already been # done. prefixes = [] for fn in os.listdir(os.path.join(tgt_wav_dir, "WORDLIST")): if fn.endswith(".wav"): pre, _ = os.path.splitext(fn) prefixes.append(os.path.join("WORDLIST", pre)) for fn in os.listdir(os.path.join(tgt_wav_dir, "TEXT")): if fn.endswith(".wav"): pre, _ = os.path.splitext(fn) prefixes.append(os.path.join("TEXT", pre)) if feat_type=="phonemes_onehot": import numpy as np #prepare_labels("phonemes") for prefix in prefixes: label_fn = os.path.join(label_dir, "%s.phonemes" % prefix) out_fn = os.path.join(feat_dir, "%s.phonemes_onehot" % prefix) try: with open(label_fn) as label_f: labels = label_f.readlines()[0].split() except FileNotFoundError: continue indices = [PHONEMES_TO_INDICES[label] for label in labels] one_hots = [[0]*len(PHONEMES) for _ in labels] for i, index in enumerate(indices): one_hots[i][index] = 1 one_hots = np.array(one_hots) np.save(out_fn, one_hots) else: # Otherwise, for prefix in prefixes: # Convert the wave to 16k mono. wav_fn = os.path.join(tgt_wav_dir, "%s.wav" % prefix) mono16k_wav_fn = os.path.join(feat_dir, "%s.wav" % prefix) if not os.path.isfile(mono16k_wav_fn): logging.info("Normalizing wav {} to a 16k 16KHz mono {}".format( wav_fn, mono16k_wav_fn)) feat_extract.convert_wav(wav_fn, mono16k_wav_fn) # Extract features from the wavs. feat_extract.from_dir(Path(os.path.join(feat_dir, "WORDLIST")), feat_type=feat_type) feat_extract.from_dir(Path(os.path.join(feat_dir, "TEXT")), feat_type=feat_type)
[ "def", "prepare_feats", "(", "feat_type", ",", "org_wav_dir", "=", "ORG_WAV_DIR", ",", "feat_dir", "=", "FEAT_DIR", ",", "tgt_wav_dir", "=", "TGT_WAV_DIR", ",", "org_xml_dir", "=", "ORG_XML_DIR", ",", "label_dir", "=", "LABEL_DIR", ")", ":", "if", "not", "os", ".", "path", ".", "isdir", "(", "TGT_DIR", ")", ":", "os", ".", "makedirs", "(", "TGT_DIR", ")", "if", "not", "os", ".", "path", ".", "isdir", "(", "FEAT_DIR", ")", ":", "os", ".", "makedirs", "(", "FEAT_DIR", ")", "if", "not", "os", ".", "path", ".", "isdir", "(", "os", ".", "path", ".", "join", "(", "feat_dir", ",", "\"WORDLIST\"", ")", ")", ":", "os", ".", "makedirs", "(", "os", ".", "path", ".", "join", "(", "feat_dir", ",", "\"WORDLIST\"", ")", ")", "if", "not", "os", ".", "path", ".", "isdir", "(", "os", ".", "path", ".", "join", "(", "feat_dir", ",", "\"TEXT\"", ")", ")", ":", "os", ".", "makedirs", "(", "os", ".", "path", ".", "join", "(", "feat_dir", ",", "\"TEXT\"", ")", ")", "# Extract utterances from WAVS.", "trim_wavs", "(", "org_wav_dir", "=", "org_wav_dir", ",", "tgt_wav_dir", "=", "tgt_wav_dir", ",", "org_xml_dir", "=", "org_xml_dir", ")", "# TODO Currently assumes that the wav trimming from XML has already been", "# done.", "prefixes", "=", "[", "]", "for", "fn", "in", "os", ".", "listdir", "(", "os", ".", "path", ".", "join", "(", "tgt_wav_dir", ",", "\"WORDLIST\"", ")", ")", ":", "if", "fn", ".", "endswith", "(", "\".wav\"", ")", ":", "pre", ",", "_", "=", "os", ".", "path", ".", "splitext", "(", "fn", ")", "prefixes", ".", "append", "(", "os", ".", "path", ".", "join", "(", "\"WORDLIST\"", ",", "pre", ")", ")", "for", "fn", "in", "os", ".", "listdir", "(", "os", ".", "path", ".", "join", "(", "tgt_wav_dir", ",", "\"TEXT\"", ")", ")", ":", "if", "fn", ".", "endswith", "(", "\".wav\"", ")", ":", "pre", ",", "_", "=", "os", ".", "path", ".", "splitext", "(", "fn", ")", "prefixes", ".", "append", "(", "os", ".", "path", ".", "join", "(", "\"TEXT\"", ",", "pre", ")", ")", "if", "feat_type", "==", "\"phonemes_onehot\"", ":", "import", "numpy", "as", "np", "#prepare_labels(\"phonemes\")", "for", "prefix", "in", "prefixes", ":", "label_fn", "=", "os", ".", "path", ".", "join", "(", "label_dir", ",", "\"%s.phonemes\"", "%", "prefix", ")", "out_fn", "=", "os", ".", "path", ".", "join", "(", "feat_dir", ",", "\"%s.phonemes_onehot\"", "%", "prefix", ")", "try", ":", "with", "open", "(", "label_fn", ")", "as", "label_f", ":", "labels", "=", "label_f", ".", "readlines", "(", ")", "[", "0", "]", ".", "split", "(", ")", "except", "FileNotFoundError", ":", "continue", "indices", "=", "[", "PHONEMES_TO_INDICES", "[", "label", "]", "for", "label", "in", "labels", "]", "one_hots", "=", "[", "[", "0", "]", "*", "len", "(", "PHONEMES", ")", "for", "_", "in", "labels", "]", "for", "i", ",", "index", "in", "enumerate", "(", "indices", ")", ":", "one_hots", "[", "i", "]", "[", "index", "]", "=", "1", "one_hots", "=", "np", ".", "array", "(", "one_hots", ")", "np", ".", "save", "(", "out_fn", ",", "one_hots", ")", "else", ":", "# Otherwise, ", "for", "prefix", "in", "prefixes", ":", "# Convert the wave to 16k mono.", "wav_fn", "=", "os", ".", "path", ".", "join", "(", "tgt_wav_dir", ",", "\"%s.wav\"", "%", "prefix", ")", "mono16k_wav_fn", "=", "os", ".", "path", ".", "join", "(", "feat_dir", ",", "\"%s.wav\"", "%", "prefix", ")", "if", "not", "os", ".", "path", ".", "isfile", "(", "mono16k_wav_fn", ")", ":", "logging", ".", "info", "(", "\"Normalizing wav {} to a 16k 16KHz mono {}\"", ".", "format", "(", "wav_fn", ",", "mono16k_wav_fn", ")", ")", "feat_extract", ".", "convert_wav", "(", "wav_fn", ",", "mono16k_wav_fn", ")", "# Extract features from the wavs.", "feat_extract", ".", "from_dir", "(", "Path", "(", "os", ".", "path", ".", "join", "(", "feat_dir", ",", "\"WORDLIST\"", ")", ")", ",", "feat_type", "=", "feat_type", ")", "feat_extract", ".", "from_dir", "(", "Path", "(", "os", ".", "path", ".", "join", "(", "feat_dir", ",", "\"TEXT\"", ")", ")", ",", "feat_type", "=", "feat_type", ")" ]
Prepare the input features.
[ "Prepare", "the", "input", "features", "." ]
f94c63e4d5fe719fb1deba449b177bb299d225fb
https://github.com/persephone-tools/persephone/blob/f94c63e4d5fe719fb1deba449b177bb299d225fb/persephone/datasets/na.py#L340-L402
train
persephone-tools/persephone
persephone/datasets/na.py
get_story_prefixes
def get_story_prefixes(label_type, label_dir=LABEL_DIR): """ Gets the Na text prefixes. """ prefixes = [prefix for prefix in os.listdir(os.path.join(label_dir, "TEXT")) if prefix.endswith(".%s" % label_type)] prefixes = [os.path.splitext(os.path.join("TEXT", prefix))[0] for prefix in prefixes] return prefixes
python
def get_story_prefixes(label_type, label_dir=LABEL_DIR): """ Gets the Na text prefixes. """ prefixes = [prefix for prefix in os.listdir(os.path.join(label_dir, "TEXT")) if prefix.endswith(".%s" % label_type)] prefixes = [os.path.splitext(os.path.join("TEXT", prefix))[0] for prefix in prefixes] return prefixes
[ "def", "get_story_prefixes", "(", "label_type", ",", "label_dir", "=", "LABEL_DIR", ")", ":", "prefixes", "=", "[", "prefix", "for", "prefix", "in", "os", ".", "listdir", "(", "os", ".", "path", ".", "join", "(", "label_dir", ",", "\"TEXT\"", ")", ")", "if", "prefix", ".", "endswith", "(", "\".%s\"", "%", "label_type", ")", "]", "prefixes", "=", "[", "os", ".", "path", ".", "splitext", "(", "os", ".", "path", ".", "join", "(", "\"TEXT\"", ",", "prefix", ")", ")", "[", "0", "]", "for", "prefix", "in", "prefixes", "]", "return", "prefixes" ]
Gets the Na text prefixes.
[ "Gets", "the", "Na", "text", "prefixes", "." ]
f94c63e4d5fe719fb1deba449b177bb299d225fb
https://github.com/persephone-tools/persephone/blob/f94c63e4d5fe719fb1deba449b177bb299d225fb/persephone/datasets/na.py#L404-L410
train
persephone-tools/persephone
persephone/datasets/na.py
get_stories
def get_stories(label_type): """ Returns a list of the stories in the Na corpus. """ prefixes = get_story_prefixes(label_type) texts = list(set([prefix.split(".")[0].split("/")[1] for prefix in prefixes])) return texts
python
def get_stories(label_type): """ Returns a list of the stories in the Na corpus. """ prefixes = get_story_prefixes(label_type) texts = list(set([prefix.split(".")[0].split("/")[1] for prefix in prefixes])) return texts
[ "def", "get_stories", "(", "label_type", ")", ":", "prefixes", "=", "get_story_prefixes", "(", "label_type", ")", "texts", "=", "list", "(", "set", "(", "[", "prefix", ".", "split", "(", "\".\"", ")", "[", "0", "]", ".", "split", "(", "\"/\"", ")", "[", "1", "]", "for", "prefix", "in", "prefixes", "]", ")", ")", "return", "texts" ]
Returns a list of the stories in the Na corpus.
[ "Returns", "a", "list", "of", "the", "stories", "in", "the", "Na", "corpus", "." ]
f94c63e4d5fe719fb1deba449b177bb299d225fb
https://github.com/persephone-tools/persephone/blob/f94c63e4d5fe719fb1deba449b177bb299d225fb/persephone/datasets/na.py#L456-L461
train
persephone-tools/persephone
persephone/datasets/na.py
Corpus.make_data_splits
def make_data_splits(self, max_samples, valid_story=None, test_story=None): """Split data into train, valid and test groups""" # TODO Make this also work with wordlists. if valid_story or test_story: if not (valid_story and test_story): raise PersephoneException( "We need a valid story if we specify a test story " "and vice versa. This shouldn't be required but for " "now it is.") train, valid, test = make_story_splits(valid_story, test_story, max_samples, self.label_type, tgt_dir=str(self.tgt_dir)) else: train, valid, test = make_data_splits(self.label_type, train_rec_type=self.train_rec_type, max_samples=max_samples, tgt_dir=str(self.tgt_dir)) self.train_prefixes = train self.valid_prefixes = valid self.test_prefixes = test
python
def make_data_splits(self, max_samples, valid_story=None, test_story=None): """Split data into train, valid and test groups""" # TODO Make this also work with wordlists. if valid_story or test_story: if not (valid_story and test_story): raise PersephoneException( "We need a valid story if we specify a test story " "and vice versa. This shouldn't be required but for " "now it is.") train, valid, test = make_story_splits(valid_story, test_story, max_samples, self.label_type, tgt_dir=str(self.tgt_dir)) else: train, valid, test = make_data_splits(self.label_type, train_rec_type=self.train_rec_type, max_samples=max_samples, tgt_dir=str(self.tgt_dir)) self.train_prefixes = train self.valid_prefixes = valid self.test_prefixes = test
[ "def", "make_data_splits", "(", "self", ",", "max_samples", ",", "valid_story", "=", "None", ",", "test_story", "=", "None", ")", ":", "# TODO Make this also work with wordlists.", "if", "valid_story", "or", "test_story", ":", "if", "not", "(", "valid_story", "and", "test_story", ")", ":", "raise", "PersephoneException", "(", "\"We need a valid story if we specify a test story \"", "\"and vice versa. This shouldn't be required but for \"", "\"now it is.\"", ")", "train", ",", "valid", ",", "test", "=", "make_story_splits", "(", "valid_story", ",", "test_story", ",", "max_samples", ",", "self", ".", "label_type", ",", "tgt_dir", "=", "str", "(", "self", ".", "tgt_dir", ")", ")", "else", ":", "train", ",", "valid", ",", "test", "=", "make_data_splits", "(", "self", ".", "label_type", ",", "train_rec_type", "=", "self", ".", "train_rec_type", ",", "max_samples", "=", "max_samples", ",", "tgt_dir", "=", "str", "(", "self", ".", "tgt_dir", ")", ")", "self", ".", "train_prefixes", "=", "train", "self", ".", "valid_prefixes", "=", "valid", "self", ".", "test_prefixes", "=", "test" ]
Split data into train, valid and test groups
[ "Split", "data", "into", "train", "valid", "and", "test", "groups" ]
f94c63e4d5fe719fb1deba449b177bb299d225fb
https://github.com/persephone-tools/persephone/blob/f94c63e4d5fe719fb1deba449b177bb299d225fb/persephone/datasets/na.py#L537-L558
train
persephone-tools/persephone
persephone/datasets/na.py
Corpus.output_story_prefixes
def output_story_prefixes(self): """ Writes the set of prefixes to a file this is useful for pretty printing in results.latex_output. """ if not self.test_story: raise NotImplementedError( "I want to write the prefixes to a file" "called <test_story>_prefixes.txt, but there's no test_story.") fn = os.path.join(TGT_DIR, "%s_prefixes.txt" % self.test_story) with open(fn, "w") as f: for utter_id in self.test_prefixes: print(utter_id.split("/")[1], file=f)
python
def output_story_prefixes(self): """ Writes the set of prefixes to a file this is useful for pretty printing in results.latex_output. """ if not self.test_story: raise NotImplementedError( "I want to write the prefixes to a file" "called <test_story>_prefixes.txt, but there's no test_story.") fn = os.path.join(TGT_DIR, "%s_prefixes.txt" % self.test_story) with open(fn, "w") as f: for utter_id in self.test_prefixes: print(utter_id.split("/")[1], file=f)
[ "def", "output_story_prefixes", "(", "self", ")", ":", "if", "not", "self", ".", "test_story", ":", "raise", "NotImplementedError", "(", "\"I want to write the prefixes to a file\"", "\"called <test_story>_prefixes.txt, but there's no test_story.\"", ")", "fn", "=", "os", ".", "path", ".", "join", "(", "TGT_DIR", ",", "\"%s_prefixes.txt\"", "%", "self", ".", "test_story", ")", "with", "open", "(", "fn", ",", "\"w\"", ")", "as", "f", ":", "for", "utter_id", "in", "self", ".", "test_prefixes", ":", "print", "(", "utter_id", ".", "split", "(", "\"/\"", ")", "[", "1", "]", ",", "file", "=", "f", ")" ]
Writes the set of prefixes to a file this is useful for pretty printing in results.latex_output.
[ "Writes", "the", "set", "of", "prefixes", "to", "a", "file", "this", "is", "useful", "for", "pretty", "printing", "in", "results", ".", "latex_output", "." ]
f94c63e4d5fe719fb1deba449b177bb299d225fb
https://github.com/persephone-tools/persephone/blob/f94c63e4d5fe719fb1deba449b177bb299d225fb/persephone/datasets/na.py#L560-L572
train
KxSystems/pyq
setup.py
add_data_file
def add_data_file(data_files, target, source): """Add an entry to data_files""" for t, f in data_files: if t == target: break else: data_files.append((target, [])) f = data_files[-1][1] if source not in f: f.append(source)
python
def add_data_file(data_files, target, source): """Add an entry to data_files""" for t, f in data_files: if t == target: break else: data_files.append((target, [])) f = data_files[-1][1] if source not in f: f.append(source)
[ "def", "add_data_file", "(", "data_files", ",", "target", ",", "source", ")", ":", "for", "t", ",", "f", "in", "data_files", ":", "if", "t", "==", "target", ":", "break", "else", ":", "data_files", ".", "append", "(", "(", "target", ",", "[", "]", ")", ")", "f", "=", "data_files", "[", "-", "1", "]", "[", "1", "]", "if", "source", "not", "in", "f", ":", "f", ".", "append", "(", "source", ")" ]
Add an entry to data_files
[ "Add", "an", "entry", "to", "data_files" ]
ad7b807abde94615a7344aaa930bb01fb1552cc5
https://github.com/KxSystems/pyq/blob/ad7b807abde94615a7344aaa930bb01fb1552cc5/setup.py#L145-L154
train
KxSystems/pyq
setup.py
get_q_home
def get_q_home(env): """Derive q home from the environment""" q_home = env.get('QHOME') if q_home: return q_home for v in ['VIRTUAL_ENV', 'HOME']: prefix = env.get(v) if prefix: q_home = os.path.join(prefix, 'q') if os.path.isdir(q_home): return q_home if WINDOWS: q_home = os.path.join(env['SystemDrive'], r'\q') if os.path.isdir(q_home): return q_home raise RuntimeError('No suitable QHOME.')
python
def get_q_home(env): """Derive q home from the environment""" q_home = env.get('QHOME') if q_home: return q_home for v in ['VIRTUAL_ENV', 'HOME']: prefix = env.get(v) if prefix: q_home = os.path.join(prefix, 'q') if os.path.isdir(q_home): return q_home if WINDOWS: q_home = os.path.join(env['SystemDrive'], r'\q') if os.path.isdir(q_home): return q_home raise RuntimeError('No suitable QHOME.')
[ "def", "get_q_home", "(", "env", ")", ":", "q_home", "=", "env", ".", "get", "(", "'QHOME'", ")", "if", "q_home", ":", "return", "q_home", "for", "v", "in", "[", "'VIRTUAL_ENV'", ",", "'HOME'", "]", ":", "prefix", "=", "env", ".", "get", "(", "v", ")", "if", "prefix", ":", "q_home", "=", "os", ".", "path", ".", "join", "(", "prefix", ",", "'q'", ")", "if", "os", ".", "path", ".", "isdir", "(", "q_home", ")", ":", "return", "q_home", "if", "WINDOWS", ":", "q_home", "=", "os", ".", "path", ".", "join", "(", "env", "[", "'SystemDrive'", "]", ",", "r'\\q'", ")", "if", "os", ".", "path", ".", "isdir", "(", "q_home", ")", ":", "return", "q_home", "raise", "RuntimeError", "(", "'No suitable QHOME.'", ")" ]
Derive q home from the environment
[ "Derive", "q", "home", "from", "the", "environment" ]
ad7b807abde94615a7344aaa930bb01fb1552cc5
https://github.com/KxSystems/pyq/blob/ad7b807abde94615a7344aaa930bb01fb1552cc5/setup.py#L185-L200
train
KxSystems/pyq
setup.py
get_q_version
def get_q_version(q_home): """Return version of q installed at q_home""" with open(os.path.join(q_home, 'q.k')) as f: for line in f: if line.startswith('k:'): return line[2:5] return '2.2'
python
def get_q_version(q_home): """Return version of q installed at q_home""" with open(os.path.join(q_home, 'q.k')) as f: for line in f: if line.startswith('k:'): return line[2:5] return '2.2'
[ "def", "get_q_version", "(", "q_home", ")", ":", "with", "open", "(", "os", ".", "path", ".", "join", "(", "q_home", ",", "'q.k'", ")", ")", "as", "f", ":", "for", "line", "in", "f", ":", "if", "line", ".", "startswith", "(", "'k:'", ")", ":", "return", "line", "[", "2", ":", "5", "]", "return", "'2.2'" ]
Return version of q installed at q_home
[ "Return", "version", "of", "q", "installed", "at", "q_home" ]
ad7b807abde94615a7344aaa930bb01fb1552cc5
https://github.com/KxSystems/pyq/blob/ad7b807abde94615a7344aaa930bb01fb1552cc5/setup.py#L230-L236
train
KxSystems/pyq
src/pyq/cmd.py
Cmd.precmd
def precmd(self, line): """Support for help""" if line.startswith('help'): if not q("`help in key`.q"): try: q("\\l help.q") except kerr: return '-1"no help available - install help.q"' if line == 'help': line += "`" return line
python
def precmd(self, line): """Support for help""" if line.startswith('help'): if not q("`help in key`.q"): try: q("\\l help.q") except kerr: return '-1"no help available - install help.q"' if line == 'help': line += "`" return line
[ "def", "precmd", "(", "self", ",", "line", ")", ":", "if", "line", ".", "startswith", "(", "'help'", ")", ":", "if", "not", "q", "(", "\"`help in key`.q\"", ")", ":", "try", ":", "q", "(", "\"\\\\l help.q\"", ")", "except", "kerr", ":", "return", "'-1\"no help available - install help.q\"'", "if", "line", "==", "'help'", ":", "line", "+=", "\"`\"", "return", "line" ]
Support for help
[ "Support", "for", "help" ]
ad7b807abde94615a7344aaa930bb01fb1552cc5
https://github.com/KxSystems/pyq/blob/ad7b807abde94615a7344aaa930bb01fb1552cc5/src/pyq/cmd.py#L35-L45
train
KxSystems/pyq
src/pyq/cmd.py
Cmd.onecmd
def onecmd(self, line): """Interpret the line""" if line == '\\': return True elif line == 'EOF': print('\r', end='') return True else: try: v = q(line) except kerr as e: print("'%s" % e.args[0]) else: if v != q('::'): v.show() return False
python
def onecmd(self, line): """Interpret the line""" if line == '\\': return True elif line == 'EOF': print('\r', end='') return True else: try: v = q(line) except kerr as e: print("'%s" % e.args[0]) else: if v != q('::'): v.show() return False
[ "def", "onecmd", "(", "self", ",", "line", ")", ":", "if", "line", "==", "'\\\\'", ":", "return", "True", "elif", "line", "==", "'EOF'", ":", "print", "(", "'\\r'", ",", "end", "=", "''", ")", "return", "True", "else", ":", "try", ":", "v", "=", "q", "(", "line", ")", "except", "kerr", "as", "e", ":", "print", "(", "\"'%s\"", "%", "e", ".", "args", "[", "0", "]", ")", "else", ":", "if", "v", "!=", "q", "(", "'::'", ")", ":", "v", ".", "show", "(", ")", "return", "False" ]
Interpret the line
[ "Interpret", "the", "line" ]
ad7b807abde94615a7344aaa930bb01fb1552cc5
https://github.com/KxSystems/pyq/blob/ad7b807abde94615a7344aaa930bb01fb1552cc5/src/pyq/cmd.py#L47-L62
train
KxSystems/pyq
src/pyq/_pt_run.py
run
def run(q_prompt=False): """Run a prompt-toolkit based REPL""" lines, columns = console_size() q(r'\c %d %d' % (lines, columns)) if len(sys.argv) > 1: try: q(r'\l %s' % sys.argv[1]) except kerr as e: print(e) raise SystemExit(1) else: del sys.argv[1] if q_prompt: q() ptp.run()
python
def run(q_prompt=False): """Run a prompt-toolkit based REPL""" lines, columns = console_size() q(r'\c %d %d' % (lines, columns)) if len(sys.argv) > 1: try: q(r'\l %s' % sys.argv[1]) except kerr as e: print(e) raise SystemExit(1) else: del sys.argv[1] if q_prompt: q() ptp.run()
[ "def", "run", "(", "q_prompt", "=", "False", ")", ":", "lines", ",", "columns", "=", "console_size", "(", ")", "q", "(", "r'\\c %d %d'", "%", "(", "lines", ",", "columns", ")", ")", "if", "len", "(", "sys", ".", "argv", ")", ">", "1", ":", "try", ":", "q", "(", "r'\\l %s'", "%", "sys", ".", "argv", "[", "1", "]", ")", "except", "kerr", "as", "e", ":", "print", "(", "e", ")", "raise", "SystemExit", "(", "1", ")", "else", ":", "del", "sys", ".", "argv", "[", "1", "]", "if", "q_prompt", ":", "q", "(", ")", "ptp", ".", "run", "(", ")" ]
Run a prompt-toolkit based REPL
[ "Run", "a", "prompt", "-", "toolkit", "based", "REPL" ]
ad7b807abde94615a7344aaa930bb01fb1552cc5
https://github.com/KxSystems/pyq/blob/ad7b807abde94615a7344aaa930bb01fb1552cc5/src/pyq/_pt_run.py#L32-L46
train
KxSystems/pyq
src/pyq/_n.py
get_unit
def get_unit(a): """Extract the time unit from array's dtype""" typestr = a.dtype.str i = typestr.find('[') if i == -1: raise TypeError("Expected a datetime64 array, not %s", a.dtype) return typestr[i + 1: -1]
python
def get_unit(a): """Extract the time unit from array's dtype""" typestr = a.dtype.str i = typestr.find('[') if i == -1: raise TypeError("Expected a datetime64 array, not %s", a.dtype) return typestr[i + 1: -1]
[ "def", "get_unit", "(", "a", ")", ":", "typestr", "=", "a", ".", "dtype", ".", "str", "i", "=", "typestr", ".", "find", "(", "'['", ")", "if", "i", "==", "-", "1", ":", "raise", "TypeError", "(", "\"Expected a datetime64 array, not %s\"", ",", "a", ".", "dtype", ")", "return", "typestr", "[", "i", "+", "1", ":", "-", "1", "]" ]
Extract the time unit from array's dtype
[ "Extract", "the", "time", "unit", "from", "array", "s", "dtype" ]
ad7b807abde94615a7344aaa930bb01fb1552cc5
https://github.com/KxSystems/pyq/blob/ad7b807abde94615a7344aaa930bb01fb1552cc5/src/pyq/_n.py#L50-L56
train
KxSystems/pyq
src/pyq/_n.py
k2a
def k2a(a, x): """Rescale data from a K object x to array a. """ func, scale = None, 1 t = abs(x._t) # timestamp (12), month (13), date (14) or datetime (15) if 12 <= t <= 15: unit = get_unit(a) attr, shift, func, scale = _UNIT[unit] a[:] = getattr(x, attr).data a += shift # timespan (16), minute (17), second (18) or time (19) elif 16 <= t <= 19: unit = get_unit(a) func, scale = _SCALE[unit] a[:] = x.timespan.data else: a[:] = list(x) if func is not None: func = getattr(numpy, func) a[:] = func(a.view(dtype='i8'), scale) if a.dtype.char in 'mM': n = x.null if n.any: a[n] = None
python
def k2a(a, x): """Rescale data from a K object x to array a. """ func, scale = None, 1 t = abs(x._t) # timestamp (12), month (13), date (14) or datetime (15) if 12 <= t <= 15: unit = get_unit(a) attr, shift, func, scale = _UNIT[unit] a[:] = getattr(x, attr).data a += shift # timespan (16), minute (17), second (18) or time (19) elif 16 <= t <= 19: unit = get_unit(a) func, scale = _SCALE[unit] a[:] = x.timespan.data else: a[:] = list(x) if func is not None: func = getattr(numpy, func) a[:] = func(a.view(dtype='i8'), scale) if a.dtype.char in 'mM': n = x.null if n.any: a[n] = None
[ "def", "k2a", "(", "a", ",", "x", ")", ":", "func", ",", "scale", "=", "None", ",", "1", "t", "=", "abs", "(", "x", ".", "_t", ")", "# timestamp (12), month (13), date (14) or datetime (15)", "if", "12", "<=", "t", "<=", "15", ":", "unit", "=", "get_unit", "(", "a", ")", "attr", ",", "shift", ",", "func", ",", "scale", "=", "_UNIT", "[", "unit", "]", "a", "[", ":", "]", "=", "getattr", "(", "x", ",", "attr", ")", ".", "data", "a", "+=", "shift", "# timespan (16), minute (17), second (18) or time (19)", "elif", "16", "<=", "t", "<=", "19", ":", "unit", "=", "get_unit", "(", "a", ")", "func", ",", "scale", "=", "_SCALE", "[", "unit", "]", "a", "[", ":", "]", "=", "x", ".", "timespan", ".", "data", "else", ":", "a", "[", ":", "]", "=", "list", "(", "x", ")", "if", "func", "is", "not", "None", ":", "func", "=", "getattr", "(", "numpy", ",", "func", ")", "a", "[", ":", "]", "=", "func", "(", "a", ".", "view", "(", "dtype", "=", "'i8'", ")", ",", "scale", ")", "if", "a", ".", "dtype", ".", "char", "in", "'mM'", ":", "n", "=", "x", ".", "null", "if", "n", ".", "any", ":", "a", "[", "n", "]", "=", "None" ]
Rescale data from a K object x to array a.
[ "Rescale", "data", "from", "a", "K", "object", "x", "to", "array", "a", "." ]
ad7b807abde94615a7344aaa930bb01fb1552cc5
https://github.com/KxSystems/pyq/blob/ad7b807abde94615a7344aaa930bb01fb1552cc5/src/pyq/_n.py#L118-L145
train
KxSystems/pyq
src/pyq/__init__.py
K.show
def show(self, start=0, geometry=None, output=None): """pretty-print data to the console (similar to q.show, but uses python stdout by default) >>> x = q('([k:`x`y`z]a:1 2 3;b:10 20 30)') >>> x.show() # doctest: +NORMALIZE_WHITESPACE k| a b -| ---- x| 1 10 y| 2 20 z| 3 30 the first optional argument, 'start' specifies the first row to be printed (negative means from the end) >>> x.show(2) # doctest: +NORMALIZE_WHITESPACE k| a b -| ---- z| 3 30 >>> x.show(-2) # doctest: +NORMALIZE_WHITESPACE k| a b -| ---- y| 2 20 z| 3 30 the geometry is the height and width of the console >>> x.show(geometry=[4, 6]) k| a.. -| -.. x| 1.. .. """ if output is None: output = sys.stdout if geometry is None: geometry = q.value(kp("\\c")) else: geometry = self._I(geometry) if start < 0: start += q.count(self) # Make sure nil is not passed to a q function if self._id() != nil._id(): r = self._show(geometry, start) else: r = '::\n' if isinstance(output, type): return output(r) try: output.write(r) except TypeError: output.write(str(r))
python
def show(self, start=0, geometry=None, output=None): """pretty-print data to the console (similar to q.show, but uses python stdout by default) >>> x = q('([k:`x`y`z]a:1 2 3;b:10 20 30)') >>> x.show() # doctest: +NORMALIZE_WHITESPACE k| a b -| ---- x| 1 10 y| 2 20 z| 3 30 the first optional argument, 'start' specifies the first row to be printed (negative means from the end) >>> x.show(2) # doctest: +NORMALIZE_WHITESPACE k| a b -| ---- z| 3 30 >>> x.show(-2) # doctest: +NORMALIZE_WHITESPACE k| a b -| ---- y| 2 20 z| 3 30 the geometry is the height and width of the console >>> x.show(geometry=[4, 6]) k| a.. -| -.. x| 1.. .. """ if output is None: output = sys.stdout if geometry is None: geometry = q.value(kp("\\c")) else: geometry = self._I(geometry) if start < 0: start += q.count(self) # Make sure nil is not passed to a q function if self._id() != nil._id(): r = self._show(geometry, start) else: r = '::\n' if isinstance(output, type): return output(r) try: output.write(r) except TypeError: output.write(str(r))
[ "def", "show", "(", "self", ",", "start", "=", "0", ",", "geometry", "=", "None", ",", "output", "=", "None", ")", ":", "if", "output", "is", "None", ":", "output", "=", "sys", ".", "stdout", "if", "geometry", "is", "None", ":", "geometry", "=", "q", ".", "value", "(", "kp", "(", "\"\\\\c\"", ")", ")", "else", ":", "geometry", "=", "self", ".", "_I", "(", "geometry", ")", "if", "start", "<", "0", ":", "start", "+=", "q", ".", "count", "(", "self", ")", "# Make sure nil is not passed to a q function", "if", "self", ".", "_id", "(", ")", "!=", "nil", ".", "_id", "(", ")", ":", "r", "=", "self", ".", "_show", "(", "geometry", ",", "start", ")", "else", ":", "r", "=", "'::\\n'", "if", "isinstance", "(", "output", ",", "type", ")", ":", "return", "output", "(", "r", ")", "try", ":", "output", ".", "write", "(", "r", ")", "except", "TypeError", ":", "output", ".", "write", "(", "str", "(", "r", ")", ")" ]
pretty-print data to the console (similar to q.show, but uses python stdout by default) >>> x = q('([k:`x`y`z]a:1 2 3;b:10 20 30)') >>> x.show() # doctest: +NORMALIZE_WHITESPACE k| a b -| ---- x| 1 10 y| 2 20 z| 3 30 the first optional argument, 'start' specifies the first row to be printed (negative means from the end) >>> x.show(2) # doctest: +NORMALIZE_WHITESPACE k| a b -| ---- z| 3 30 >>> x.show(-2) # doctest: +NORMALIZE_WHITESPACE k| a b -| ---- y| 2 20 z| 3 30 the geometry is the height and width of the console >>> x.show(geometry=[4, 6]) k| a.. -| -.. x| 1.. ..
[ "pretty", "-", "print", "data", "to", "the", "console" ]
ad7b807abde94615a7344aaa930bb01fb1552cc5
https://github.com/KxSystems/pyq/blob/ad7b807abde94615a7344aaa930bb01fb1552cc5/src/pyq/__init__.py#L377-L435
train
KxSystems/pyq
src/pyq/__init__.py
K.select
def select(self, columns=(), by=(), where=(), **kwds): """select from self >>> t = q('([]a:1 2 3; b:10 20 30)') >>> t.select('a', where='b > 20').show() a - 3 """ return self._seu('select', columns, by, where, kwds)
python
def select(self, columns=(), by=(), where=(), **kwds): """select from self >>> t = q('([]a:1 2 3; b:10 20 30)') >>> t.select('a', where='b > 20').show() a - 3 """ return self._seu('select', columns, by, where, kwds)
[ "def", "select", "(", "self", ",", "columns", "=", "(", ")", ",", "by", "=", "(", ")", ",", "where", "=", "(", ")", ",", "*", "*", "kwds", ")", ":", "return", "self", ".", "_seu", "(", "'select'", ",", "columns", ",", "by", ",", "where", ",", "kwds", ")" ]
select from self >>> t = q('([]a:1 2 3; b:10 20 30)') >>> t.select('a', where='b > 20').show() a - 3
[ "select", "from", "self" ]
ad7b807abde94615a7344aaa930bb01fb1552cc5
https://github.com/KxSystems/pyq/blob/ad7b807abde94615a7344aaa930bb01fb1552cc5/src/pyq/__init__.py#L465-L474
train
KxSystems/pyq
src/pyq/__init__.py
K.exec_
def exec_(self, columns=(), by=(), where=(), **kwds): """exec from self >>> t = q('([]a:1 2 3; b:10 20 30)') >>> t.exec_('a', where='b > 10').show() 2 3 """ return self._seu('exec', columns, by, where, kwds)
python
def exec_(self, columns=(), by=(), where=(), **kwds): """exec from self >>> t = q('([]a:1 2 3; b:10 20 30)') >>> t.exec_('a', where='b > 10').show() 2 3 """ return self._seu('exec', columns, by, where, kwds)
[ "def", "exec_", "(", "self", ",", "columns", "=", "(", ")", ",", "by", "=", "(", ")", ",", "where", "=", "(", ")", ",", "*", "*", "kwds", ")", ":", "return", "self", ".", "_seu", "(", "'exec'", ",", "columns", ",", "by", ",", "where", ",", "kwds", ")" ]
exec from self >>> t = q('([]a:1 2 3; b:10 20 30)') >>> t.exec_('a', where='b > 10').show() 2 3
[ "exec", "from", "self" ]
ad7b807abde94615a7344aaa930bb01fb1552cc5
https://github.com/KxSystems/pyq/blob/ad7b807abde94615a7344aaa930bb01fb1552cc5/src/pyq/__init__.py#L476-L483
train
KxSystems/pyq
src/pyq/__init__.py
K.update
def update(self, columns=(), by=(), where=(), **kwds): """update from self >>> t = q('([]a:1 2 3; b:10 20 30)') >>> t.update('a*2', ... where='b > 20').show() # doctest: +NORMALIZE_WHITESPACE a b ---- 1 10 2 20 6 30 """ return self._seu('update', columns, by, where, kwds)
python
def update(self, columns=(), by=(), where=(), **kwds): """update from self >>> t = q('([]a:1 2 3; b:10 20 30)') >>> t.update('a*2', ... where='b > 20').show() # doctest: +NORMALIZE_WHITESPACE a b ---- 1 10 2 20 6 30 """ return self._seu('update', columns, by, where, kwds)
[ "def", "update", "(", "self", ",", "columns", "=", "(", ")", ",", "by", "=", "(", ")", ",", "where", "=", "(", ")", ",", "*", "*", "kwds", ")", ":", "return", "self", ".", "_seu", "(", "'update'", ",", "columns", ",", "by", ",", "where", ",", "kwds", ")" ]
update from self >>> t = q('([]a:1 2 3; b:10 20 30)') >>> t.update('a*2', ... where='b > 20').show() # doctest: +NORMALIZE_WHITESPACE a b ---- 1 10 2 20 6 30
[ "update", "from", "self" ]
ad7b807abde94615a7344aaa930bb01fb1552cc5
https://github.com/KxSystems/pyq/blob/ad7b807abde94615a7344aaa930bb01fb1552cc5/src/pyq/__init__.py#L485-L497
train
KxSystems/pyq
src/pyq/__init__.py
K.dict
def dict(cls, *args, **kwds): """Construct a q dictionary K.dict() -> new empty q dictionary (q('()!()') K.dict(mapping) -> new dictionary initialized from a mapping object's (key, value) pairs K.dict(iterable) -> new dictionary initialized from an iterable yielding (key, value) pairs K.dict(**kwargs) -> new dictionary initialized with the name=value pairs in the keyword argument list. For example: K.dict(one=1, two=2) """ if args: if len(args) > 1: raise TypeError("Too many positional arguments") x = args[0] keys = [] vals = [] try: x_keys = x.keys except AttributeError: for k, v in x: keys.append(k) vals.append(v) else: keys = x_keys() vals = [x[k] for k in keys] return q('!', keys, vals) else: if kwds: keys = [] vals = [] for k, v in kwds.items(): keys.append(k) vals.append(v) return q('!', keys, vals) else: return q('()!()')
python
def dict(cls, *args, **kwds): """Construct a q dictionary K.dict() -> new empty q dictionary (q('()!()') K.dict(mapping) -> new dictionary initialized from a mapping object's (key, value) pairs K.dict(iterable) -> new dictionary initialized from an iterable yielding (key, value) pairs K.dict(**kwargs) -> new dictionary initialized with the name=value pairs in the keyword argument list. For example: K.dict(one=1, two=2) """ if args: if len(args) > 1: raise TypeError("Too many positional arguments") x = args[0] keys = [] vals = [] try: x_keys = x.keys except AttributeError: for k, v in x: keys.append(k) vals.append(v) else: keys = x_keys() vals = [x[k] for k in keys] return q('!', keys, vals) else: if kwds: keys = [] vals = [] for k, v in kwds.items(): keys.append(k) vals.append(v) return q('!', keys, vals) else: return q('()!()')
[ "def", "dict", "(", "cls", ",", "*", "args", ",", "*", "*", "kwds", ")", ":", "if", "args", ":", "if", "len", "(", "args", ")", ">", "1", ":", "raise", "TypeError", "(", "\"Too many positional arguments\"", ")", "x", "=", "args", "[", "0", "]", "keys", "=", "[", "]", "vals", "=", "[", "]", "try", ":", "x_keys", "=", "x", ".", "keys", "except", "AttributeError", ":", "for", "k", ",", "v", "in", "x", ":", "keys", ".", "append", "(", "k", ")", "vals", ".", "append", "(", "v", ")", "else", ":", "keys", "=", "x_keys", "(", ")", "vals", "=", "[", "x", "[", "k", "]", "for", "k", "in", "keys", "]", "return", "q", "(", "'!'", ",", "keys", ",", "vals", ")", "else", ":", "if", "kwds", ":", "keys", "=", "[", "]", "vals", "=", "[", "]", "for", "k", ",", "v", "in", "kwds", ".", "items", "(", ")", ":", "keys", ".", "append", "(", "k", ")", "vals", ".", "append", "(", "v", ")", "return", "q", "(", "'!'", ",", "keys", ",", "vals", ")", "else", ":", "return", "q", "(", "'()!()'", ")" ]
Construct a q dictionary K.dict() -> new empty q dictionary (q('()!()') K.dict(mapping) -> new dictionary initialized from a mapping object's (key, value) pairs K.dict(iterable) -> new dictionary initialized from an iterable yielding (key, value) pairs K.dict(**kwargs) -> new dictionary initialized with the name=value pairs in the keyword argument list. For example: K.dict(one=1, two=2)
[ "Construct", "a", "q", "dictionary" ]
ad7b807abde94615a7344aaa930bb01fb1552cc5
https://github.com/KxSystems/pyq/blob/ad7b807abde94615a7344aaa930bb01fb1552cc5/src/pyq/__init__.py#L558-L595
train
KxSystems/pyq
src/pyq/magic.py
logical_lines
def logical_lines(lines): """Merge lines into chunks according to q rules""" if isinstance(lines, string_types): lines = StringIO(lines) buf = [] for line in lines: if buf and not line.startswith(' '): chunk = ''.join(buf).strip() if chunk: yield chunk buf[:] = [] buf.append(line) chunk = ''.join(buf).strip() if chunk: yield chunk
python
def logical_lines(lines): """Merge lines into chunks according to q rules""" if isinstance(lines, string_types): lines = StringIO(lines) buf = [] for line in lines: if buf and not line.startswith(' '): chunk = ''.join(buf).strip() if chunk: yield chunk buf[:] = [] buf.append(line) chunk = ''.join(buf).strip() if chunk: yield chunk
[ "def", "logical_lines", "(", "lines", ")", ":", "if", "isinstance", "(", "lines", ",", "string_types", ")", ":", "lines", "=", "StringIO", "(", "lines", ")", "buf", "=", "[", "]", "for", "line", "in", "lines", ":", "if", "buf", "and", "not", "line", ".", "startswith", "(", "' '", ")", ":", "chunk", "=", "''", ".", "join", "(", "buf", ")", ".", "strip", "(", ")", "if", "chunk", ":", "yield", "chunk", "buf", "[", ":", "]", "=", "[", "]", "buf", ".", "append", "(", "line", ")", "chunk", "=", "''", ".", "join", "(", "buf", ")", ".", "strip", "(", ")", "if", "chunk", ":", "yield", "chunk" ]
Merge lines into chunks according to q rules
[ "Merge", "lines", "into", "chunks", "according", "to", "q", "rules" ]
ad7b807abde94615a7344aaa930bb01fb1552cc5
https://github.com/KxSystems/pyq/blob/ad7b807abde94615a7344aaa930bb01fb1552cc5/src/pyq/magic.py#L23-L38
train
KxSystems/pyq
src/pyq/magic.py
q
def q(line, cell=None, _ns=None): """Run q code. Options: -l (dir|script) - pre-load database or script -h host:port - execute on the given host -o var - send output to a variable named var. -i var1,..,varN - input variables -1/-2 - redirect stdout/stderr """ if cell is None: return pyq.q(line) if _ns is None: _ns = vars(sys.modules['__main__']) input = output = None preload = [] outs = {} try: h = pyq.q('0i') if line: for opt, value in getopt(line.split(), "h:l:o:i:12")[0]: if opt == '-l': preload.append(value) elif opt == '-h': h = pyq.K(str(':' + value)) elif opt == '-o': output = str(value) # (see #673) elif opt == '-i': input = str(value).split(',') elif opt in ('-1', '-2'): outs[int(opt[1])] = None if outs: if int(h) != 0: raise ValueError("Cannot redirect remote std stream") for fd in outs: tmpfd, tmpfile = mkstemp() try: pyq.q(r'\%d %s' % (fd, tmpfile)) finally: os.unlink(tmpfile) os.close(tmpfd) r = None for script in preload: h(pyq.kp(r"\l " + script)) if input is not None: for chunk in logical_lines(cell): func = "{[%s]%s}" % (';'.join(input), chunk) args = tuple(_ns[i] for i in input) if r != Q_NONE: r.show() r = h((pyq.kp(func),) + args) if outs: _forward_outputs(outs) else: for chunk in logical_lines(cell): if r != Q_NONE: r.show() r = h(pyq.kp(chunk)) if outs: _forward_outputs(outs) except pyq.kerr as e: print("'%s" % e) else: if output is not None: if output.startswith('q.'): pyq.q('@[`.;;:;]', output[2:], r) else: _ns[output] = r else: if r != Q_NONE: return r
python
def q(line, cell=None, _ns=None): """Run q code. Options: -l (dir|script) - pre-load database or script -h host:port - execute on the given host -o var - send output to a variable named var. -i var1,..,varN - input variables -1/-2 - redirect stdout/stderr """ if cell is None: return pyq.q(line) if _ns is None: _ns = vars(sys.modules['__main__']) input = output = None preload = [] outs = {} try: h = pyq.q('0i') if line: for opt, value in getopt(line.split(), "h:l:o:i:12")[0]: if opt == '-l': preload.append(value) elif opt == '-h': h = pyq.K(str(':' + value)) elif opt == '-o': output = str(value) # (see #673) elif opt == '-i': input = str(value).split(',') elif opt in ('-1', '-2'): outs[int(opt[1])] = None if outs: if int(h) != 0: raise ValueError("Cannot redirect remote std stream") for fd in outs: tmpfd, tmpfile = mkstemp() try: pyq.q(r'\%d %s' % (fd, tmpfile)) finally: os.unlink(tmpfile) os.close(tmpfd) r = None for script in preload: h(pyq.kp(r"\l " + script)) if input is not None: for chunk in logical_lines(cell): func = "{[%s]%s}" % (';'.join(input), chunk) args = tuple(_ns[i] for i in input) if r != Q_NONE: r.show() r = h((pyq.kp(func),) + args) if outs: _forward_outputs(outs) else: for chunk in logical_lines(cell): if r != Q_NONE: r.show() r = h(pyq.kp(chunk)) if outs: _forward_outputs(outs) except pyq.kerr as e: print("'%s" % e) else: if output is not None: if output.startswith('q.'): pyq.q('@[`.;;:;]', output[2:], r) else: _ns[output] = r else: if r != Q_NONE: return r
[ "def", "q", "(", "line", ",", "cell", "=", "None", ",", "_ns", "=", "None", ")", ":", "if", "cell", "is", "None", ":", "return", "pyq", ".", "q", "(", "line", ")", "if", "_ns", "is", "None", ":", "_ns", "=", "vars", "(", "sys", ".", "modules", "[", "'__main__'", "]", ")", "input", "=", "output", "=", "None", "preload", "=", "[", "]", "outs", "=", "{", "}", "try", ":", "h", "=", "pyq", ".", "q", "(", "'0i'", ")", "if", "line", ":", "for", "opt", ",", "value", "in", "getopt", "(", "line", ".", "split", "(", ")", ",", "\"h:l:o:i:12\"", ")", "[", "0", "]", ":", "if", "opt", "==", "'-l'", ":", "preload", ".", "append", "(", "value", ")", "elif", "opt", "==", "'-h'", ":", "h", "=", "pyq", ".", "K", "(", "str", "(", "':'", "+", "value", ")", ")", "elif", "opt", "==", "'-o'", ":", "output", "=", "str", "(", "value", ")", "# (see #673)", "elif", "opt", "==", "'-i'", ":", "input", "=", "str", "(", "value", ")", ".", "split", "(", "','", ")", "elif", "opt", "in", "(", "'-1'", ",", "'-2'", ")", ":", "outs", "[", "int", "(", "opt", "[", "1", "]", ")", "]", "=", "None", "if", "outs", ":", "if", "int", "(", "h", ")", "!=", "0", ":", "raise", "ValueError", "(", "\"Cannot redirect remote std stream\"", ")", "for", "fd", "in", "outs", ":", "tmpfd", ",", "tmpfile", "=", "mkstemp", "(", ")", "try", ":", "pyq", ".", "q", "(", "r'\\%d %s'", "%", "(", "fd", ",", "tmpfile", ")", ")", "finally", ":", "os", ".", "unlink", "(", "tmpfile", ")", "os", ".", "close", "(", "tmpfd", ")", "r", "=", "None", "for", "script", "in", "preload", ":", "h", "(", "pyq", ".", "kp", "(", "r\"\\l \"", "+", "script", ")", ")", "if", "input", "is", "not", "None", ":", "for", "chunk", "in", "logical_lines", "(", "cell", ")", ":", "func", "=", "\"{[%s]%s}\"", "%", "(", "';'", ".", "join", "(", "input", ")", ",", "chunk", ")", "args", "=", "tuple", "(", "_ns", "[", "i", "]", "for", "i", "in", "input", ")", "if", "r", "!=", "Q_NONE", ":", "r", ".", "show", "(", ")", "r", "=", "h", "(", "(", "pyq", ".", "kp", "(", "func", ")", ",", ")", "+", "args", ")", "if", "outs", ":", "_forward_outputs", "(", "outs", ")", "else", ":", "for", "chunk", "in", "logical_lines", "(", "cell", ")", ":", "if", "r", "!=", "Q_NONE", ":", "r", ".", "show", "(", ")", "r", "=", "h", "(", "pyq", ".", "kp", "(", "chunk", ")", ")", "if", "outs", ":", "_forward_outputs", "(", "outs", ")", "except", "pyq", ".", "kerr", "as", "e", ":", "print", "(", "\"'%s\"", "%", "e", ")", "else", ":", "if", "output", "is", "not", "None", ":", "if", "output", ".", "startswith", "(", "'q.'", ")", ":", "pyq", ".", "q", "(", "'@[`.;;:;]'", ",", "output", "[", "2", ":", "]", ",", "r", ")", "else", ":", "_ns", "[", "output", "]", "=", "r", "else", ":", "if", "r", "!=", "Q_NONE", ":", "return", "r" ]
Run q code. Options: -l (dir|script) - pre-load database or script -h host:port - execute on the given host -o var - send output to a variable named var. -i var1,..,varN - input variables -1/-2 - redirect stdout/stderr
[ "Run", "q", "code", "." ]
ad7b807abde94615a7344aaa930bb01fb1552cc5
https://github.com/KxSystems/pyq/blob/ad7b807abde94615a7344aaa930bb01fb1552cc5/src/pyq/magic.py#L50-L121
train
KxSystems/pyq
src/pyq/magic.py
load_ipython_extension
def load_ipython_extension(ipython): """Register %q and %%q magics and pretty display for K objects""" ipython.register_magic_function(q, 'line_cell') fmr = ipython.display_formatter.formatters['text/plain'] fmr.for_type(pyq.K, _q_formatter)
python
def load_ipython_extension(ipython): """Register %q and %%q magics and pretty display for K objects""" ipython.register_magic_function(q, 'line_cell') fmr = ipython.display_formatter.formatters['text/plain'] fmr.for_type(pyq.K, _q_formatter)
[ "def", "load_ipython_extension", "(", "ipython", ")", ":", "ipython", ".", "register_magic_function", "(", "q", ",", "'line_cell'", ")", "fmr", "=", "ipython", ".", "display_formatter", ".", "formatters", "[", "'text/plain'", "]", "fmr", ".", "for_type", "(", "pyq", ".", "K", ",", "_q_formatter", ")" ]
Register %q and %%q magics and pretty display for K objects
[ "Register", "%q", "and", "%%q", "magics", "and", "pretty", "display", "for", "K", "objects" ]
ad7b807abde94615a7344aaa930bb01fb1552cc5
https://github.com/KxSystems/pyq/blob/ad7b807abde94615a7344aaa930bb01fb1552cc5/src/pyq/magic.py#L129-L133
train
KxSystems/pyq
src/pyq/ptk.py
get_prompt_tokens
def get_prompt_tokens(_): """Return a list of tokens for the prompt""" namespace = q(r'\d') if namespace == '.': namespace = '' return [(Token.Generic.Prompt, 'q%s)' % namespace)]
python
def get_prompt_tokens(_): """Return a list of tokens for the prompt""" namespace = q(r'\d') if namespace == '.': namespace = '' return [(Token.Generic.Prompt, 'q%s)' % namespace)]
[ "def", "get_prompt_tokens", "(", "_", ")", ":", "namespace", "=", "q", "(", "r'\\d'", ")", "if", "namespace", "==", "'.'", ":", "namespace", "=", "''", "return", "[", "(", "Token", ".", "Generic", ".", "Prompt", ",", "'q%s)'", "%", "namespace", ")", "]" ]
Return a list of tokens for the prompt
[ "Return", "a", "list", "of", "tokens", "for", "the", "prompt" ]
ad7b807abde94615a7344aaa930bb01fb1552cc5
https://github.com/KxSystems/pyq/blob/ad7b807abde94615a7344aaa930bb01fb1552cc5/src/pyq/ptk.py#L48-L53
train
KxSystems/pyq
src/pyq/ptk.py
cmdloop
def cmdloop(self, intro=None): """A Cmd.cmdloop implementation""" style = style_from_pygments(BasicStyle, style_dict) self.preloop() stop = None while not stop: line = prompt(get_prompt_tokens=get_prompt_tokens, lexer=lexer, get_bottom_toolbar_tokens=get_bottom_toolbar_tokens, history=history, style=style, true_color=True, on_exit='return-none', on_abort='return-none', completer=QCompleter()) if line is None or line.strip() == r'\\': raise SystemExit else: line = self.precmd(line) stop = self.onecmd(line) stop = self.postcmd(stop, line) self.postloop()
python
def cmdloop(self, intro=None): """A Cmd.cmdloop implementation""" style = style_from_pygments(BasicStyle, style_dict) self.preloop() stop = None while not stop: line = prompt(get_prompt_tokens=get_prompt_tokens, lexer=lexer, get_bottom_toolbar_tokens=get_bottom_toolbar_tokens, history=history, style=style, true_color=True, on_exit='return-none', on_abort='return-none', completer=QCompleter()) if line is None or line.strip() == r'\\': raise SystemExit else: line = self.precmd(line) stop = self.onecmd(line) stop = self.postcmd(stop, line) self.postloop()
[ "def", "cmdloop", "(", "self", ",", "intro", "=", "None", ")", ":", "style", "=", "style_from_pygments", "(", "BasicStyle", ",", "style_dict", ")", "self", ".", "preloop", "(", ")", "stop", "=", "None", "while", "not", "stop", ":", "line", "=", "prompt", "(", "get_prompt_tokens", "=", "get_prompt_tokens", ",", "lexer", "=", "lexer", ",", "get_bottom_toolbar_tokens", "=", "get_bottom_toolbar_tokens", ",", "history", "=", "history", ",", "style", "=", "style", ",", "true_color", "=", "True", ",", "on_exit", "=", "'return-none'", ",", "on_abort", "=", "'return-none'", ",", "completer", "=", "QCompleter", "(", ")", ")", "if", "line", "is", "None", "or", "line", ".", "strip", "(", ")", "==", "r'\\\\'", ":", "raise", "SystemExit", "else", ":", "line", "=", "self", ".", "precmd", "(", "line", ")", "stop", "=", "self", ".", "onecmd", "(", "line", ")", "stop", "=", "self", ".", "postcmd", "(", "stop", ",", "line", ")", "self", ".", "postloop", "(", ")" ]
A Cmd.cmdloop implementation
[ "A", "Cmd", ".", "cmdloop", "implementation" ]
ad7b807abde94615a7344aaa930bb01fb1552cc5
https://github.com/KxSystems/pyq/blob/ad7b807abde94615a7344aaa930bb01fb1552cc5/src/pyq/ptk.py#L90-L107
train
mapbox/snuggs
snuggs/__init__.py
eval
def eval(source, kwd_dict=None, **kwds): """Evaluate a snuggs expression. Parameters ---------- source : str Expression source. kwd_dict : dict A dict of items that form the evaluation context. Deprecated. kwds : dict A dict of items that form the valuation context. Returns ------- object """ kwd_dict = kwd_dict or kwds with ctx(kwd_dict): return handleLine(source)
python
def eval(source, kwd_dict=None, **kwds): """Evaluate a snuggs expression. Parameters ---------- source : str Expression source. kwd_dict : dict A dict of items that form the evaluation context. Deprecated. kwds : dict A dict of items that form the valuation context. Returns ------- object """ kwd_dict = kwd_dict or kwds with ctx(kwd_dict): return handleLine(source)
[ "def", "eval", "(", "source", ",", "kwd_dict", "=", "None", ",", "*", "*", "kwds", ")", ":", "kwd_dict", "=", "kwd_dict", "or", "kwds", "with", "ctx", "(", "kwd_dict", ")", ":", "return", "handleLine", "(", "source", ")" ]
Evaluate a snuggs expression. Parameters ---------- source : str Expression source. kwd_dict : dict A dict of items that form the evaluation context. Deprecated. kwds : dict A dict of items that form the valuation context. Returns ------- object
[ "Evaluate", "a", "snuggs", "expression", "." ]
7517839178accf78ae9624b7186d03b77f837e02
https://github.com/mapbox/snuggs/blob/7517839178accf78ae9624b7186d03b77f837e02/snuggs/__init__.py#L208-L227
train
josiahcarlson/parse-crontab
crontab/_crontab.py
CronTab._make_matchers
def _make_matchers(self, crontab): ''' This constructs the full matcher struct. ''' crontab = _aliases.get(crontab, crontab) ct = crontab.split() if len(ct) == 5: ct.insert(0, '0') ct.append('*') elif len(ct) == 6: ct.insert(0, '0') _assert(len(ct) == 7, "improper number of cron entries specified; got %i need 5 to 7"%(len(ct,))) matchers = [_Matcher(which, entry) for which, entry in enumerate(ct)] return Matcher(*matchers)
python
def _make_matchers(self, crontab): ''' This constructs the full matcher struct. ''' crontab = _aliases.get(crontab, crontab) ct = crontab.split() if len(ct) == 5: ct.insert(0, '0') ct.append('*') elif len(ct) == 6: ct.insert(0, '0') _assert(len(ct) == 7, "improper number of cron entries specified; got %i need 5 to 7"%(len(ct,))) matchers = [_Matcher(which, entry) for which, entry in enumerate(ct)] return Matcher(*matchers)
[ "def", "_make_matchers", "(", "self", ",", "crontab", ")", ":", "crontab", "=", "_aliases", ".", "get", "(", "crontab", ",", "crontab", ")", "ct", "=", "crontab", ".", "split", "(", ")", "if", "len", "(", "ct", ")", "==", "5", ":", "ct", ".", "insert", "(", "0", ",", "'0'", ")", "ct", ".", "append", "(", "'*'", ")", "elif", "len", "(", "ct", ")", "==", "6", ":", "ct", ".", "insert", "(", "0", ",", "'0'", ")", "_assert", "(", "len", "(", "ct", ")", "==", "7", ",", "\"improper number of cron entries specified; got %i need 5 to 7\"", "%", "(", "len", "(", "ct", ",", ")", ")", ")", "matchers", "=", "[", "_Matcher", "(", "which", ",", "entry", ")", "for", "which", ",", "entry", "in", "enumerate", "(", "ct", ")", "]", "return", "Matcher", "(", "*", "matchers", ")" ]
This constructs the full matcher struct.
[ "This", "constructs", "the", "full", "matcher", "struct", "." ]
b2bd254cf14e8c83e502615851b0d4b62f73ab15
https://github.com/josiahcarlson/parse-crontab/blob/b2bd254cf14e8c83e502615851b0d4b62f73ab15/crontab/_crontab.py#L361-L377
train
josiahcarlson/parse-crontab
crontab/_crontab.py
CronTab.next
def next(self, now=None, increments=_increments, delta=True, default_utc=WARN_CHANGE): ''' How long to wait in seconds before this crontab entry can next be executed. ''' if default_utc is WARN_CHANGE and (isinstance(now, _number_types) or (now and not now.tzinfo) or now is None): warnings.warn(WARNING_CHANGE_MESSAGE, FutureWarning, 2) default_utc = False now = now or (datetime.utcnow() if default_utc and default_utc is not WARN_CHANGE else datetime.now()) if isinstance(now, _number_types): now = datetime.utcfromtimestamp(now) if default_utc else datetime.fromtimestamp(now) # handle timezones if the datetime object has a timezone and get a # reasonable future/past start time onow, now = now, now.replace(tzinfo=None) tz = onow.tzinfo future = now.replace(microsecond=0) + increments[0]() if future < now: # we are going backwards... _test = lambda: future.year < self.matchers.year if now.microsecond: future = now.replace(microsecond=0) else: # we are going forwards _test = lambda: self.matchers.year < future.year # Start from the year and work our way down. Any time we increment a # higher-magnitude value, we reset all lower-magnitude values. This # gets us performance without sacrificing correctness. Still more # complicated than a brute-force approach, but also orders of # magnitude faster in basically all cases. to_test = ENTRIES - 1 while to_test >= 0: if not self._test_match(to_test, future): inc = increments[to_test](future, self.matchers) future += inc for i in xrange(0, to_test): future = increments[ENTRIES+i](future, inc) try: if _test(): return None except: print(future, type(future), type(inc)) raise to_test = ENTRIES-1 continue to_test -= 1 # verify the match match = [self._test_match(i, future) for i in xrange(ENTRIES)] _assert(all(match), "\nYou have discovered a bug with crontab, please notify the\n" \ "author with the following information:\n" \ "crontab: %r\n" \ "now: %r", ' '.join(m.input for m in self.matchers), now) if not delta: onow = now = datetime(1970, 1, 1) delay = future - now if tz: delay += _fix_none(onow.utcoffset()) if hasattr(tz, 'localize'): delay -= _fix_none(tz.localize(future).utcoffset()) else: delay -= _fix_none(future.replace(tzinfo=tz).utcoffset()) return delay.days * 86400 + delay.seconds + delay.microseconds / 1000000.
python
def next(self, now=None, increments=_increments, delta=True, default_utc=WARN_CHANGE): ''' How long to wait in seconds before this crontab entry can next be executed. ''' if default_utc is WARN_CHANGE and (isinstance(now, _number_types) or (now and not now.tzinfo) or now is None): warnings.warn(WARNING_CHANGE_MESSAGE, FutureWarning, 2) default_utc = False now = now or (datetime.utcnow() if default_utc and default_utc is not WARN_CHANGE else datetime.now()) if isinstance(now, _number_types): now = datetime.utcfromtimestamp(now) if default_utc else datetime.fromtimestamp(now) # handle timezones if the datetime object has a timezone and get a # reasonable future/past start time onow, now = now, now.replace(tzinfo=None) tz = onow.tzinfo future = now.replace(microsecond=0) + increments[0]() if future < now: # we are going backwards... _test = lambda: future.year < self.matchers.year if now.microsecond: future = now.replace(microsecond=0) else: # we are going forwards _test = lambda: self.matchers.year < future.year # Start from the year and work our way down. Any time we increment a # higher-magnitude value, we reset all lower-magnitude values. This # gets us performance without sacrificing correctness. Still more # complicated than a brute-force approach, but also orders of # magnitude faster in basically all cases. to_test = ENTRIES - 1 while to_test >= 0: if not self._test_match(to_test, future): inc = increments[to_test](future, self.matchers) future += inc for i in xrange(0, to_test): future = increments[ENTRIES+i](future, inc) try: if _test(): return None except: print(future, type(future), type(inc)) raise to_test = ENTRIES-1 continue to_test -= 1 # verify the match match = [self._test_match(i, future) for i in xrange(ENTRIES)] _assert(all(match), "\nYou have discovered a bug with crontab, please notify the\n" \ "author with the following information:\n" \ "crontab: %r\n" \ "now: %r", ' '.join(m.input for m in self.matchers), now) if not delta: onow = now = datetime(1970, 1, 1) delay = future - now if tz: delay += _fix_none(onow.utcoffset()) if hasattr(tz, 'localize'): delay -= _fix_none(tz.localize(future).utcoffset()) else: delay -= _fix_none(future.replace(tzinfo=tz).utcoffset()) return delay.days * 86400 + delay.seconds + delay.microseconds / 1000000.
[ "def", "next", "(", "self", ",", "now", "=", "None", ",", "increments", "=", "_increments", ",", "delta", "=", "True", ",", "default_utc", "=", "WARN_CHANGE", ")", ":", "if", "default_utc", "is", "WARN_CHANGE", "and", "(", "isinstance", "(", "now", ",", "_number_types", ")", "or", "(", "now", "and", "not", "now", ".", "tzinfo", ")", "or", "now", "is", "None", ")", ":", "warnings", ".", "warn", "(", "WARNING_CHANGE_MESSAGE", ",", "FutureWarning", ",", "2", ")", "default_utc", "=", "False", "now", "=", "now", "or", "(", "datetime", ".", "utcnow", "(", ")", "if", "default_utc", "and", "default_utc", "is", "not", "WARN_CHANGE", "else", "datetime", ".", "now", "(", ")", ")", "if", "isinstance", "(", "now", ",", "_number_types", ")", ":", "now", "=", "datetime", ".", "utcfromtimestamp", "(", "now", ")", "if", "default_utc", "else", "datetime", ".", "fromtimestamp", "(", "now", ")", "# handle timezones if the datetime object has a timezone and get a", "# reasonable future/past start time", "onow", ",", "now", "=", "now", ",", "now", ".", "replace", "(", "tzinfo", "=", "None", ")", "tz", "=", "onow", ".", "tzinfo", "future", "=", "now", ".", "replace", "(", "microsecond", "=", "0", ")", "+", "increments", "[", "0", "]", "(", ")", "if", "future", "<", "now", ":", "# we are going backwards...", "_test", "=", "lambda", ":", "future", ".", "year", "<", "self", ".", "matchers", ".", "year", "if", "now", ".", "microsecond", ":", "future", "=", "now", ".", "replace", "(", "microsecond", "=", "0", ")", "else", ":", "# we are going forwards", "_test", "=", "lambda", ":", "self", ".", "matchers", ".", "year", "<", "future", ".", "year", "# Start from the year and work our way down. Any time we increment a", "# higher-magnitude value, we reset all lower-magnitude values. This", "# gets us performance without sacrificing correctness. Still more", "# complicated than a brute-force approach, but also orders of", "# magnitude faster in basically all cases.", "to_test", "=", "ENTRIES", "-", "1", "while", "to_test", ">=", "0", ":", "if", "not", "self", ".", "_test_match", "(", "to_test", ",", "future", ")", ":", "inc", "=", "increments", "[", "to_test", "]", "(", "future", ",", "self", ".", "matchers", ")", "future", "+=", "inc", "for", "i", "in", "xrange", "(", "0", ",", "to_test", ")", ":", "future", "=", "increments", "[", "ENTRIES", "+", "i", "]", "(", "future", ",", "inc", ")", "try", ":", "if", "_test", "(", ")", ":", "return", "None", "except", ":", "print", "(", "future", ",", "type", "(", "future", ")", ",", "type", "(", "inc", ")", ")", "raise", "to_test", "=", "ENTRIES", "-", "1", "continue", "to_test", "-=", "1", "# verify the match", "match", "=", "[", "self", ".", "_test_match", "(", "i", ",", "future", ")", "for", "i", "in", "xrange", "(", "ENTRIES", ")", "]", "_assert", "(", "all", "(", "match", ")", ",", "\"\\nYou have discovered a bug with crontab, please notify the\\n\"", "\"author with the following information:\\n\"", "\"crontab: %r\\n\"", "\"now: %r\"", ",", "' '", ".", "join", "(", "m", ".", "input", "for", "m", "in", "self", ".", "matchers", ")", ",", "now", ")", "if", "not", "delta", ":", "onow", "=", "now", "=", "datetime", "(", "1970", ",", "1", ",", "1", ")", "delay", "=", "future", "-", "now", "if", "tz", ":", "delay", "+=", "_fix_none", "(", "onow", ".", "utcoffset", "(", ")", ")", "if", "hasattr", "(", "tz", ",", "'localize'", ")", ":", "delay", "-=", "_fix_none", "(", "tz", ".", "localize", "(", "future", ")", ".", "utcoffset", "(", ")", ")", "else", ":", "delay", "-=", "_fix_none", "(", "future", ".", "replace", "(", "tzinfo", "=", "tz", ")", ".", "utcoffset", "(", ")", ")", "return", "delay", ".", "days", "*", "86400", "+", "delay", ".", "seconds", "+", "delay", ".", "microseconds", "/", "1000000." ]
How long to wait in seconds before this crontab entry can next be executed.
[ "How", "long", "to", "wait", "in", "seconds", "before", "this", "crontab", "entry", "can", "next", "be", "executed", "." ]
b2bd254cf14e8c83e502615851b0d4b62f73ab15
https://github.com/josiahcarlson/parse-crontab/blob/b2bd254cf14e8c83e502615851b0d4b62f73ab15/crontab/_crontab.py#L390-L458
train
sanand0/xmljson
xmljson/__init__.py
XMLData._tostring
def _tostring(value): '''Convert value to XML compatible string''' if value is True: value = 'true' elif value is False: value = 'false' elif value is None: value = '' return unicode(value)
python
def _tostring(value): '''Convert value to XML compatible string''' if value is True: value = 'true' elif value is False: value = 'false' elif value is None: value = '' return unicode(value)
[ "def", "_tostring", "(", "value", ")", ":", "if", "value", "is", "True", ":", "value", "=", "'true'", "elif", "value", "is", "False", ":", "value", "=", "'false'", "elif", "value", "is", "None", ":", "value", "=", "''", "return", "unicode", "(", "value", ")" ]
Convert value to XML compatible string
[ "Convert", "value", "to", "XML", "compatible", "string" ]
2ecc2065fe7c87b3d282d362289927f13ce7f8b0
https://github.com/sanand0/xmljson/blob/2ecc2065fe7c87b3d282d362289927f13ce7f8b0/xmljson/__init__.py#L61-L69
train
sanand0/xmljson
xmljson/__init__.py
XMLData._fromstring
def _fromstring(value): '''Convert XML string value to None, boolean, int or float''' # NOTE: Is this even possible ? if value is None: return None # FIXME: In XML, booleans are either 0/false or 1/true (lower-case !) if value.lower() == 'true': return True elif value.lower() == 'false': return False # FIXME: Using int() or float() is eating whitespaces unintendedly here try: return int(value) except ValueError: pass try: # Test for infinity and NaN values if float('-inf') < float(value) < float('inf'): return float(value) except ValueError: pass return value
python
def _fromstring(value): '''Convert XML string value to None, boolean, int or float''' # NOTE: Is this even possible ? if value is None: return None # FIXME: In XML, booleans are either 0/false or 1/true (lower-case !) if value.lower() == 'true': return True elif value.lower() == 'false': return False # FIXME: Using int() or float() is eating whitespaces unintendedly here try: return int(value) except ValueError: pass try: # Test for infinity and NaN values if float('-inf') < float(value) < float('inf'): return float(value) except ValueError: pass return value
[ "def", "_fromstring", "(", "value", ")", ":", "# NOTE: Is this even possible ?", "if", "value", "is", "None", ":", "return", "None", "# FIXME: In XML, booleans are either 0/false or 1/true (lower-case !)", "if", "value", ".", "lower", "(", ")", "==", "'true'", ":", "return", "True", "elif", "value", ".", "lower", "(", ")", "==", "'false'", ":", "return", "False", "# FIXME: Using int() or float() is eating whitespaces unintendedly here", "try", ":", "return", "int", "(", "value", ")", "except", "ValueError", ":", "pass", "try", ":", "# Test for infinity and NaN values", "if", "float", "(", "'-inf'", ")", "<", "float", "(", "value", ")", "<", "float", "(", "'inf'", ")", ":", "return", "float", "(", "value", ")", "except", "ValueError", ":", "pass", "return", "value" ]
Convert XML string value to None, boolean, int or float
[ "Convert", "XML", "string", "value", "to", "None", "boolean", "int", "or", "float" ]
2ecc2065fe7c87b3d282d362289927f13ce7f8b0
https://github.com/sanand0/xmljson/blob/2ecc2065fe7c87b3d282d362289927f13ce7f8b0/xmljson/__init__.py#L72-L97
train
pysal/esda
esda/join_counts.py
Join_Counts.by_col
def by_col(cls, df, cols, w=None, inplace=False, pvalue='sim', outvals=None, **stat_kws): """ Function to compute a Join_Count statistic on a dataframe Arguments --------- df : pandas.DataFrame a pandas dataframe with a geometry column cols : string or list of string name or list of names of columns to use to compute the statistic w : pysal weights object a weights object aligned with the dataframe. If not provided, this is searched for in the dataframe's metadata inplace : bool a boolean denoting whether to operate on the dataframe inplace or to return a series contaning the results of the computation. If operating inplace, the derived columns will be named 'column_join_count' pvalue : string a string denoting which pvalue should be returned. Refer to the the Join_Count statistic's documentation for available p-values outvals : list of strings list of arbitrary attributes to return as columns from the Join_Count statistic **stat_kws : keyword arguments options to pass to the underlying statistic. For this, see the documentation for the Join_Count statistic. Returns -------- If inplace, None, and operation is conducted on dataframe in memory. Otherwise, returns a copy of the dataframe with the relevant columns attached. See Also --------- For further documentation, refer to the Join_Count class in pysal.esda """ if outvals is None: outvals = [] outvals.extend(['bb', 'p_sim_bw', 'p_sim_bb']) pvalue = '' return _univariate_handler(df, cols, w=w, inplace=inplace, pvalue=pvalue, outvals=outvals, stat=cls, swapname='bw', **stat_kws)
python
def by_col(cls, df, cols, w=None, inplace=False, pvalue='sim', outvals=None, **stat_kws): """ Function to compute a Join_Count statistic on a dataframe Arguments --------- df : pandas.DataFrame a pandas dataframe with a geometry column cols : string or list of string name or list of names of columns to use to compute the statistic w : pysal weights object a weights object aligned with the dataframe. If not provided, this is searched for in the dataframe's metadata inplace : bool a boolean denoting whether to operate on the dataframe inplace or to return a series contaning the results of the computation. If operating inplace, the derived columns will be named 'column_join_count' pvalue : string a string denoting which pvalue should be returned. Refer to the the Join_Count statistic's documentation for available p-values outvals : list of strings list of arbitrary attributes to return as columns from the Join_Count statistic **stat_kws : keyword arguments options to pass to the underlying statistic. For this, see the documentation for the Join_Count statistic. Returns -------- If inplace, None, and operation is conducted on dataframe in memory. Otherwise, returns a copy of the dataframe with the relevant columns attached. See Also --------- For further documentation, refer to the Join_Count class in pysal.esda """ if outvals is None: outvals = [] outvals.extend(['bb', 'p_sim_bw', 'p_sim_bb']) pvalue = '' return _univariate_handler(df, cols, w=w, inplace=inplace, pvalue=pvalue, outvals=outvals, stat=cls, swapname='bw', **stat_kws)
[ "def", "by_col", "(", "cls", ",", "df", ",", "cols", ",", "w", "=", "None", ",", "inplace", "=", "False", ",", "pvalue", "=", "'sim'", ",", "outvals", "=", "None", ",", "*", "*", "stat_kws", ")", ":", "if", "outvals", "is", "None", ":", "outvals", "=", "[", "]", "outvals", ".", "extend", "(", "[", "'bb'", ",", "'p_sim_bw'", ",", "'p_sim_bb'", "]", ")", "pvalue", "=", "''", "return", "_univariate_handler", "(", "df", ",", "cols", ",", "w", "=", "w", ",", "inplace", "=", "inplace", ",", "pvalue", "=", "pvalue", ",", "outvals", "=", "outvals", ",", "stat", "=", "cls", ",", "swapname", "=", "'bw'", ",", "*", "*", "stat_kws", ")" ]
Function to compute a Join_Count statistic on a dataframe Arguments --------- df : pandas.DataFrame a pandas dataframe with a geometry column cols : string or list of string name or list of names of columns to use to compute the statistic w : pysal weights object a weights object aligned with the dataframe. If not provided, this is searched for in the dataframe's metadata inplace : bool a boolean denoting whether to operate on the dataframe inplace or to return a series contaning the results of the computation. If operating inplace, the derived columns will be named 'column_join_count' pvalue : string a string denoting which pvalue should be returned. Refer to the the Join_Count statistic's documentation for available p-values outvals : list of strings list of arbitrary attributes to return as columns from the Join_Count statistic **stat_kws : keyword arguments options to pass to the underlying statistic. For this, see the documentation for the Join_Count statistic. Returns -------- If inplace, None, and operation is conducted on dataframe in memory. Otherwise, returns a copy of the dataframe with the relevant columns attached. See Also --------- For further documentation, refer to the Join_Count class in pysal.esda
[ "Function", "to", "compute", "a", "Join_Count", "statistic", "on", "a", "dataframe" ]
2fafc6ec505e153152a86601d3e0fba080610c20
https://github.com/pysal/esda/blob/2fafc6ec505e153152a86601d3e0fba080610c20/esda/join_counts.py#L171-L214
train
pysal/esda
esda/moran.py
Moran_BV_matrix
def Moran_BV_matrix(variables, w, permutations=0, varnames=None): """ Bivariate Moran Matrix Calculates bivariate Moran between all pairs of a set of variables. Parameters ---------- variables : array or pandas.DataFrame sequence of variables to be assessed w : W a spatial weights object permutations : int number of permutations varnames : list, optional if variables is an array Strings for variable names. Will add an attribute to `Moran_BV` objects in results needed for plotting in `splot` or `.plot()`. Default =None. Note: If variables is a `pandas.DataFrame` varnames will automatically be generated Returns ------- results : dictionary (i, j) is the key for the pair of variables, values are the Moran_BV objects. Examples -------- open dbf >>> import libpysal >>> f = libpysal.io.open(libpysal.examples.get_path("sids2.dbf")) pull of selected variables from dbf and create numpy arrays for each >>> varnames = ['SIDR74', 'SIDR79', 'NWR74', 'NWR79'] >>> vars = [np.array(f.by_col[var]) for var in varnames] create a contiguity matrix from an external gal file >>> w = libpysal.io.open(libpysal.examples.get_path("sids2.gal")).read() create an instance of Moran_BV_matrix >>> from esda.moran import Moran_BV_matrix >>> res = Moran_BV_matrix(vars, w, varnames = varnames) check values >>> round(res[(0, 1)].I,7) 0.1936261 >>> round(res[(3, 0)].I,7) 0.3770138 """ try: # check if pandas is installed import pandas if isinstance(variables, pandas.DataFrame): # if yes use variables as df and convert to numpy_array varnames = pandas.Index.tolist(variables.columns) variables_n = [] for var in varnames: variables_n.append(variables[str(var)].values) else: variables_n = variables except ImportError: variables_n = variables results = _Moran_BV_Matrix_array(variables=variables_n, w=w, permutations=permutations, varnames=varnames) return results
python
def Moran_BV_matrix(variables, w, permutations=0, varnames=None): """ Bivariate Moran Matrix Calculates bivariate Moran between all pairs of a set of variables. Parameters ---------- variables : array or pandas.DataFrame sequence of variables to be assessed w : W a spatial weights object permutations : int number of permutations varnames : list, optional if variables is an array Strings for variable names. Will add an attribute to `Moran_BV` objects in results needed for plotting in `splot` or `.plot()`. Default =None. Note: If variables is a `pandas.DataFrame` varnames will automatically be generated Returns ------- results : dictionary (i, j) is the key for the pair of variables, values are the Moran_BV objects. Examples -------- open dbf >>> import libpysal >>> f = libpysal.io.open(libpysal.examples.get_path("sids2.dbf")) pull of selected variables from dbf and create numpy arrays for each >>> varnames = ['SIDR74', 'SIDR79', 'NWR74', 'NWR79'] >>> vars = [np.array(f.by_col[var]) for var in varnames] create a contiguity matrix from an external gal file >>> w = libpysal.io.open(libpysal.examples.get_path("sids2.gal")).read() create an instance of Moran_BV_matrix >>> from esda.moran import Moran_BV_matrix >>> res = Moran_BV_matrix(vars, w, varnames = varnames) check values >>> round(res[(0, 1)].I,7) 0.1936261 >>> round(res[(3, 0)].I,7) 0.3770138 """ try: # check if pandas is installed import pandas if isinstance(variables, pandas.DataFrame): # if yes use variables as df and convert to numpy_array varnames = pandas.Index.tolist(variables.columns) variables_n = [] for var in varnames: variables_n.append(variables[str(var)].values) else: variables_n = variables except ImportError: variables_n = variables results = _Moran_BV_Matrix_array(variables=variables_n, w=w, permutations=permutations, varnames=varnames) return results
[ "def", "Moran_BV_matrix", "(", "variables", ",", "w", ",", "permutations", "=", "0", ",", "varnames", "=", "None", ")", ":", "try", ":", "# check if pandas is installed", "import", "pandas", "if", "isinstance", "(", "variables", ",", "pandas", ".", "DataFrame", ")", ":", "# if yes use variables as df and convert to numpy_array", "varnames", "=", "pandas", ".", "Index", ".", "tolist", "(", "variables", ".", "columns", ")", "variables_n", "=", "[", "]", "for", "var", "in", "varnames", ":", "variables_n", ".", "append", "(", "variables", "[", "str", "(", "var", ")", "]", ".", "values", ")", "else", ":", "variables_n", "=", "variables", "except", "ImportError", ":", "variables_n", "=", "variables", "results", "=", "_Moran_BV_Matrix_array", "(", "variables", "=", "variables_n", ",", "w", "=", "w", ",", "permutations", "=", "permutations", ",", "varnames", "=", "varnames", ")", "return", "results" ]
Bivariate Moran Matrix Calculates bivariate Moran between all pairs of a set of variables. Parameters ---------- variables : array or pandas.DataFrame sequence of variables to be assessed w : W a spatial weights object permutations : int number of permutations varnames : list, optional if variables is an array Strings for variable names. Will add an attribute to `Moran_BV` objects in results needed for plotting in `splot` or `.plot()`. Default =None. Note: If variables is a `pandas.DataFrame` varnames will automatically be generated Returns ------- results : dictionary (i, j) is the key for the pair of variables, values are the Moran_BV objects. Examples -------- open dbf >>> import libpysal >>> f = libpysal.io.open(libpysal.examples.get_path("sids2.dbf")) pull of selected variables from dbf and create numpy arrays for each >>> varnames = ['SIDR74', 'SIDR79', 'NWR74', 'NWR79'] >>> vars = [np.array(f.by_col[var]) for var in varnames] create a contiguity matrix from an external gal file >>> w = libpysal.io.open(libpysal.examples.get_path("sids2.gal")).read() create an instance of Moran_BV_matrix >>> from esda.moran import Moran_BV_matrix >>> res = Moran_BV_matrix(vars, w, varnames = varnames) check values >>> round(res[(0, 1)].I,7) 0.1936261 >>> round(res[(3, 0)].I,7) 0.3770138
[ "Bivariate", "Moran", "Matrix" ]
2fafc6ec505e153152a86601d3e0fba080610c20
https://github.com/pysal/esda/blob/2fafc6ec505e153152a86601d3e0fba080610c20/esda/moran.py#L464-L537
train
pysal/esda
esda/moran.py
_Moran_BV_Matrix_array
def _Moran_BV_Matrix_array(variables, w, permutations=0, varnames=None): """ Base calculation for MORAN_BV_Matrix """ if varnames is None: varnames = ['x{}'.format(i) for i in range(k)] k = len(variables) rk = list(range(0, k - 1)) results = {} for i in rk: for j in range(i + 1, k): y1 = variables[i] y2 = variables[j] results[i, j] = Moran_BV(y1, y2, w, permutations=permutations) results[j, i] = Moran_BV(y2, y1, w, permutations=permutations) results[i, j].varnames = {'x': varnames[i], 'y': varnames[j]} results[j, i].varnames = {'x': varnames[j], 'y': varnames[i]} return results
python
def _Moran_BV_Matrix_array(variables, w, permutations=0, varnames=None): """ Base calculation for MORAN_BV_Matrix """ if varnames is None: varnames = ['x{}'.format(i) for i in range(k)] k = len(variables) rk = list(range(0, k - 1)) results = {} for i in rk: for j in range(i + 1, k): y1 = variables[i] y2 = variables[j] results[i, j] = Moran_BV(y1, y2, w, permutations=permutations) results[j, i] = Moran_BV(y2, y1, w, permutations=permutations) results[i, j].varnames = {'x': varnames[i], 'y': varnames[j]} results[j, i].varnames = {'x': varnames[j], 'y': varnames[i]} return results
[ "def", "_Moran_BV_Matrix_array", "(", "variables", ",", "w", ",", "permutations", "=", "0", ",", "varnames", "=", "None", ")", ":", "if", "varnames", "is", "None", ":", "varnames", "=", "[", "'x{}'", ".", "format", "(", "i", ")", "for", "i", "in", "range", "(", "k", ")", "]", "k", "=", "len", "(", "variables", ")", "rk", "=", "list", "(", "range", "(", "0", ",", "k", "-", "1", ")", ")", "results", "=", "{", "}", "for", "i", "in", "rk", ":", "for", "j", "in", "range", "(", "i", "+", "1", ",", "k", ")", ":", "y1", "=", "variables", "[", "i", "]", "y2", "=", "variables", "[", "j", "]", "results", "[", "i", ",", "j", "]", "=", "Moran_BV", "(", "y1", ",", "y2", ",", "w", ",", "permutations", "=", "permutations", ")", "results", "[", "j", ",", "i", "]", "=", "Moran_BV", "(", "y2", ",", "y1", ",", "w", ",", "permutations", "=", "permutations", ")", "results", "[", "i", ",", "j", "]", ".", "varnames", "=", "{", "'x'", ":", "varnames", "[", "i", "]", ",", "'y'", ":", "varnames", "[", "j", "]", "}", "results", "[", "j", ",", "i", "]", ".", "varnames", "=", "{", "'x'", ":", "varnames", "[", "j", "]", ",", "'y'", ":", "varnames", "[", "i", "]", "}", "return", "results" ]
Base calculation for MORAN_BV_Matrix
[ "Base", "calculation", "for", "MORAN_BV_Matrix" ]
2fafc6ec505e153152a86601d3e0fba080610c20
https://github.com/pysal/esda/blob/2fafc6ec505e153152a86601d3e0fba080610c20/esda/moran.py#L540-L558
train
pysal/esda
esda/moran.py
Moran_BV.by_col
def by_col(cls, df, x, y=None, w=None, inplace=False, pvalue='sim', outvals=None, **stat_kws): """ Function to compute a Moran_BV statistic on a dataframe Arguments --------- df : pandas.DataFrame a pandas dataframe with a geometry column X : list of strings column name or list of column names to use as X values to compute the bivariate statistic. If no Y is provided, pairwise comparisons among these variates are used instead. Y : list of strings column name or list of column names to use as Y values to compute the bivariate statistic. if no Y is provided, pariwise comparisons among the X variates are used instead. w : pysal weights object a weights object aligned with the dataframe. If not provided, this is searched for in the dataframe's metadata inplace : bool a boolean denoting whether to operate on the dataframe inplace or to return a series contaning the results of the computation. If operating inplace, the derived columns will be named 'column_moran_local' pvalue : string a string denoting which pvalue should be returned. Refer to the the Moran_BV statistic's documentation for available p-values outvals : list of strings list of arbitrary attributes to return as columns from the Moran_BV statistic **stat_kws : keyword arguments options to pass to the underlying statistic. For this, see the documentation for the Moran_BV statistic. Returns -------- If inplace, None, and operation is conducted on dataframe in memory. Otherwise, returns a copy of the dataframe with the relevant columns attached. See Also --------- For further documentation, refer to the Moran_BV class in pysal.esda """ return _bivariate_handler(df, x, y=y, w=w, inplace=inplace, pvalue = pvalue, outvals = outvals, swapname=cls.__name__.lower(), stat=cls,**stat_kws)
python
def by_col(cls, df, x, y=None, w=None, inplace=False, pvalue='sim', outvals=None, **stat_kws): """ Function to compute a Moran_BV statistic on a dataframe Arguments --------- df : pandas.DataFrame a pandas dataframe with a geometry column X : list of strings column name or list of column names to use as X values to compute the bivariate statistic. If no Y is provided, pairwise comparisons among these variates are used instead. Y : list of strings column name or list of column names to use as Y values to compute the bivariate statistic. if no Y is provided, pariwise comparisons among the X variates are used instead. w : pysal weights object a weights object aligned with the dataframe. If not provided, this is searched for in the dataframe's metadata inplace : bool a boolean denoting whether to operate on the dataframe inplace or to return a series contaning the results of the computation. If operating inplace, the derived columns will be named 'column_moran_local' pvalue : string a string denoting which pvalue should be returned. Refer to the the Moran_BV statistic's documentation for available p-values outvals : list of strings list of arbitrary attributes to return as columns from the Moran_BV statistic **stat_kws : keyword arguments options to pass to the underlying statistic. For this, see the documentation for the Moran_BV statistic. Returns -------- If inplace, None, and operation is conducted on dataframe in memory. Otherwise, returns a copy of the dataframe with the relevant columns attached. See Also --------- For further documentation, refer to the Moran_BV class in pysal.esda """ return _bivariate_handler(df, x, y=y, w=w, inplace=inplace, pvalue = pvalue, outvals = outvals, swapname=cls.__name__.lower(), stat=cls,**stat_kws)
[ "def", "by_col", "(", "cls", ",", "df", ",", "x", ",", "y", "=", "None", ",", "w", "=", "None", ",", "inplace", "=", "False", ",", "pvalue", "=", "'sim'", ",", "outvals", "=", "None", ",", "*", "*", "stat_kws", ")", ":", "return", "_bivariate_handler", "(", "df", ",", "x", ",", "y", "=", "y", ",", "w", "=", "w", ",", "inplace", "=", "inplace", ",", "pvalue", "=", "pvalue", ",", "outvals", "=", "outvals", ",", "swapname", "=", "cls", ".", "__name__", ".", "lower", "(", ")", ",", "stat", "=", "cls", ",", "*", "*", "stat_kws", ")" ]
Function to compute a Moran_BV statistic on a dataframe Arguments --------- df : pandas.DataFrame a pandas dataframe with a geometry column X : list of strings column name or list of column names to use as X values to compute the bivariate statistic. If no Y is provided, pairwise comparisons among these variates are used instead. Y : list of strings column name or list of column names to use as Y values to compute the bivariate statistic. if no Y is provided, pariwise comparisons among the X variates are used instead. w : pysal weights object a weights object aligned with the dataframe. If not provided, this is searched for in the dataframe's metadata inplace : bool a boolean denoting whether to operate on the dataframe inplace or to return a series contaning the results of the computation. If operating inplace, the derived columns will be named 'column_moran_local' pvalue : string a string denoting which pvalue should be returned. Refer to the the Moran_BV statistic's documentation for available p-values outvals : list of strings list of arbitrary attributes to return as columns from the Moran_BV statistic **stat_kws : keyword arguments options to pass to the underlying statistic. For this, see the documentation for the Moran_BV statistic. Returns -------- If inplace, None, and operation is conducted on dataframe in memory. Otherwise, returns a copy of the dataframe with the relevant columns attached. See Also --------- For further documentation, refer to the Moran_BV class in pysal.esda
[ "Function", "to", "compute", "a", "Moran_BV", "statistic", "on", "a", "dataframe" ]
2fafc6ec505e153152a86601d3e0fba080610c20
https://github.com/pysal/esda/blob/2fafc6ec505e153152a86601d3e0fba080610c20/esda/moran.py#L415-L461
train
pysal/esda
esda/moran.py
Moran_Rate.by_col
def by_col(cls, df, events, populations, w=None, inplace=False, pvalue='sim', outvals=None, swapname='', **stat_kws): """ Function to compute a Moran_Rate statistic on a dataframe Arguments --------- df : pandas.DataFrame a pandas dataframe with a geometry column events : string or list of strings one or more names where events are stored populations : string or list of strings one or more names where the populations corresponding to the events are stored. If one population column is provided, it is used for all event columns. If more than one population column is provided but there is not a population for every event column, an exception will be raised. w : pysal weights object a weights object aligned with the dataframe. If not provided, this is searched for in the dataframe's metadata inplace : bool a boolean denoting whether to operate on the dataframe inplace or to return a series contaning the results of the computation. If operating inplace, the derived columns will be named 'column_moran_rate' pvalue : string a string denoting which pvalue should be returned. Refer to the the Moran_Rate statistic's documentation for available p-values outvals : list of strings list of arbitrary attributes to return as columns from the Moran_Rate statistic **stat_kws : keyword arguments options to pass to the underlying statistic. For this, see the documentation for the Moran_Rate statistic. Returns -------- If inplace, None, and operation is conducted on dataframe in memory. Otherwise, returns a copy of the dataframe with the relevant columns attached. See Also --------- For further documentation, refer to the Moran_Rate class in pysal.esda """ if not inplace: new = df.copy() cls.by_col(new, events, populations, w=w, inplace=True, pvalue=pvalue, outvals=outvals, swapname=swapname, **stat_kws) return new if isinstance(events, str): events = [events] if isinstance(populations, str): populations = [populations] if len(populations) < len(events): populations = populations * len(events) if len(events) != len(populations): raise ValueError('There is not a one-to-one matching between events and ' 'populations!\nEvents: {}\n\nPopulations:' ' {}'.format(events, populations)) adjusted = stat_kws.pop('adjusted', True) if isinstance(adjusted, bool): adjusted = [adjusted] * len(events) if swapname is '': swapname = cls.__name__.lower() rates = [assuncao_rate(df[e], df[pop]) if adj else df[e].astype(float) / df[pop] for e,pop,adj in zip(events, populations, adjusted)] names = ['-'.join((e,p)) for e,p in zip(events, populations)] out_df = df.copy() rate_df = out_df.from_items(list(zip(names, rates))) #trick to avoid importing pandas stat_df = _univariate_handler(rate_df, names, w=w, inplace=False, pvalue = pvalue, outvals = outvals, swapname=swapname, stat=Moran, #how would this get done w/super? **stat_kws) for col in stat_df.columns: df[col] = stat_df[col]
python
def by_col(cls, df, events, populations, w=None, inplace=False, pvalue='sim', outvals=None, swapname='', **stat_kws): """ Function to compute a Moran_Rate statistic on a dataframe Arguments --------- df : pandas.DataFrame a pandas dataframe with a geometry column events : string or list of strings one or more names where events are stored populations : string or list of strings one or more names where the populations corresponding to the events are stored. If one population column is provided, it is used for all event columns. If more than one population column is provided but there is not a population for every event column, an exception will be raised. w : pysal weights object a weights object aligned with the dataframe. If not provided, this is searched for in the dataframe's metadata inplace : bool a boolean denoting whether to operate on the dataframe inplace or to return a series contaning the results of the computation. If operating inplace, the derived columns will be named 'column_moran_rate' pvalue : string a string denoting which pvalue should be returned. Refer to the the Moran_Rate statistic's documentation for available p-values outvals : list of strings list of arbitrary attributes to return as columns from the Moran_Rate statistic **stat_kws : keyword arguments options to pass to the underlying statistic. For this, see the documentation for the Moran_Rate statistic. Returns -------- If inplace, None, and operation is conducted on dataframe in memory. Otherwise, returns a copy of the dataframe with the relevant columns attached. See Also --------- For further documentation, refer to the Moran_Rate class in pysal.esda """ if not inplace: new = df.copy() cls.by_col(new, events, populations, w=w, inplace=True, pvalue=pvalue, outvals=outvals, swapname=swapname, **stat_kws) return new if isinstance(events, str): events = [events] if isinstance(populations, str): populations = [populations] if len(populations) < len(events): populations = populations * len(events) if len(events) != len(populations): raise ValueError('There is not a one-to-one matching between events and ' 'populations!\nEvents: {}\n\nPopulations:' ' {}'.format(events, populations)) adjusted = stat_kws.pop('adjusted', True) if isinstance(adjusted, bool): adjusted = [adjusted] * len(events) if swapname is '': swapname = cls.__name__.lower() rates = [assuncao_rate(df[e], df[pop]) if adj else df[e].astype(float) / df[pop] for e,pop,adj in zip(events, populations, adjusted)] names = ['-'.join((e,p)) for e,p in zip(events, populations)] out_df = df.copy() rate_df = out_df.from_items(list(zip(names, rates))) #trick to avoid importing pandas stat_df = _univariate_handler(rate_df, names, w=w, inplace=False, pvalue = pvalue, outvals = outvals, swapname=swapname, stat=Moran, #how would this get done w/super? **stat_kws) for col in stat_df.columns: df[col] = stat_df[col]
[ "def", "by_col", "(", "cls", ",", "df", ",", "events", ",", "populations", ",", "w", "=", "None", ",", "inplace", "=", "False", ",", "pvalue", "=", "'sim'", ",", "outvals", "=", "None", ",", "swapname", "=", "''", ",", "*", "*", "stat_kws", ")", ":", "if", "not", "inplace", ":", "new", "=", "df", ".", "copy", "(", ")", "cls", ".", "by_col", "(", "new", ",", "events", ",", "populations", ",", "w", "=", "w", ",", "inplace", "=", "True", ",", "pvalue", "=", "pvalue", ",", "outvals", "=", "outvals", ",", "swapname", "=", "swapname", ",", "*", "*", "stat_kws", ")", "return", "new", "if", "isinstance", "(", "events", ",", "str", ")", ":", "events", "=", "[", "events", "]", "if", "isinstance", "(", "populations", ",", "str", ")", ":", "populations", "=", "[", "populations", "]", "if", "len", "(", "populations", ")", "<", "len", "(", "events", ")", ":", "populations", "=", "populations", "*", "len", "(", "events", ")", "if", "len", "(", "events", ")", "!=", "len", "(", "populations", ")", ":", "raise", "ValueError", "(", "'There is not a one-to-one matching between events and '", "'populations!\\nEvents: {}\\n\\nPopulations:'", "' {}'", ".", "format", "(", "events", ",", "populations", ")", ")", "adjusted", "=", "stat_kws", ".", "pop", "(", "'adjusted'", ",", "True", ")", "if", "isinstance", "(", "adjusted", ",", "bool", ")", ":", "adjusted", "=", "[", "adjusted", "]", "*", "len", "(", "events", ")", "if", "swapname", "is", "''", ":", "swapname", "=", "cls", ".", "__name__", ".", "lower", "(", ")", "rates", "=", "[", "assuncao_rate", "(", "df", "[", "e", "]", ",", "df", "[", "pop", "]", ")", "if", "adj", "else", "df", "[", "e", "]", ".", "astype", "(", "float", ")", "/", "df", "[", "pop", "]", "for", "e", ",", "pop", ",", "adj", "in", "zip", "(", "events", ",", "populations", ",", "adjusted", ")", "]", "names", "=", "[", "'-'", ".", "join", "(", "(", "e", ",", "p", ")", ")", "for", "e", ",", "p", "in", "zip", "(", "events", ",", "populations", ")", "]", "out_df", "=", "df", ".", "copy", "(", ")", "rate_df", "=", "out_df", ".", "from_items", "(", "list", "(", "zip", "(", "names", ",", "rates", ")", ")", ")", "#trick to avoid importing pandas", "stat_df", "=", "_univariate_handler", "(", "rate_df", ",", "names", ",", "w", "=", "w", ",", "inplace", "=", "False", ",", "pvalue", "=", "pvalue", ",", "outvals", "=", "outvals", ",", "swapname", "=", "swapname", ",", "stat", "=", "Moran", ",", "#how would this get done w/super?", "*", "*", "stat_kws", ")", "for", "col", "in", "stat_df", ".", "columns", ":", "df", "[", "col", "]", "=", "stat_df", "[", "col", "]" ]
Function to compute a Moran_Rate statistic on a dataframe Arguments --------- df : pandas.DataFrame a pandas dataframe with a geometry column events : string or list of strings one or more names where events are stored populations : string or list of strings one or more names where the populations corresponding to the events are stored. If one population column is provided, it is used for all event columns. If more than one population column is provided but there is not a population for every event column, an exception will be raised. w : pysal weights object a weights object aligned with the dataframe. If not provided, this is searched for in the dataframe's metadata inplace : bool a boolean denoting whether to operate on the dataframe inplace or to return a series contaning the results of the computation. If operating inplace, the derived columns will be named 'column_moran_rate' pvalue : string a string denoting which pvalue should be returned. Refer to the the Moran_Rate statistic's documentation for available p-values outvals : list of strings list of arbitrary attributes to return as columns from the Moran_Rate statistic **stat_kws : keyword arguments options to pass to the underlying statistic. For this, see the documentation for the Moran_Rate statistic. Returns -------- If inplace, None, and operation is conducted on dataframe in memory. Otherwise, returns a copy of the dataframe with the relevant columns attached. See Also --------- For further documentation, refer to the Moran_Rate class in pysal.esda
[ "Function", "to", "compute", "a", "Moran_Rate", "statistic", "on", "a", "dataframe" ]
2fafc6ec505e153152a86601d3e0fba080610c20
https://github.com/pysal/esda/blob/2fafc6ec505e153152a86601d3e0fba080610c20/esda/moran.py#L679-L758
train
pysal/esda
esda/smoothing.py
flatten
def flatten(l, unique=True): """flatten a list of lists Parameters ---------- l : list of lists unique : boolean whether or not only unique items are wanted (default=True) Returns ------- list of single items Examples -------- Creating a sample list whose elements are lists of integers >>> l = [[1, 2], [3, 4, ], [5, 6]] Applying flatten function >>> flatten(l) [1, 2, 3, 4, 5, 6] """ l = reduce(lambda x, y: x + y, l) if not unique: return list(l) return list(set(l))
python
def flatten(l, unique=True): """flatten a list of lists Parameters ---------- l : list of lists unique : boolean whether or not only unique items are wanted (default=True) Returns ------- list of single items Examples -------- Creating a sample list whose elements are lists of integers >>> l = [[1, 2], [3, 4, ], [5, 6]] Applying flatten function >>> flatten(l) [1, 2, 3, 4, 5, 6] """ l = reduce(lambda x, y: x + y, l) if not unique: return list(l) return list(set(l))
[ "def", "flatten", "(", "l", ",", "unique", "=", "True", ")", ":", "l", "=", "reduce", "(", "lambda", "x", ",", "y", ":", "x", "+", "y", ",", "l", ")", "if", "not", "unique", ":", "return", "list", "(", "l", ")", "return", "list", "(", "set", "(", "l", ")", ")" ]
flatten a list of lists Parameters ---------- l : list of lists unique : boolean whether or not only unique items are wanted (default=True) Returns ------- list of single items Examples -------- Creating a sample list whose elements are lists of integers >>> l = [[1, 2], [3, 4, ], [5, 6]] Applying flatten function >>> flatten(l) [1, 2, 3, 4, 5, 6]
[ "flatten", "a", "list", "of", "lists" ]
2fafc6ec505e153152a86601d3e0fba080610c20
https://github.com/pysal/esda/blob/2fafc6ec505e153152a86601d3e0fba080610c20/esda/smoothing.py#L32-L63
train
pysal/esda
esda/smoothing.py
weighted_median
def weighted_median(d, w): """A utility function to find a median of d based on w Parameters ---------- d : array (n, 1), variable for which median will be found w : array (n, 1), variable on which d's median will be decided Notes ----- d and w are arranged in the same order Returns ------- float median of d Examples -------- Creating an array including five integers. We will get the median of these integers. >>> d = np.array([5,4,3,1,2]) Creating another array including weight values for the above integers. The median of d will be decided with a consideration to these weight values. >>> w = np.array([10, 22, 9, 2, 5]) Applying weighted_median function >>> weighted_median(d, w) 4 """ dtype = [('w', '%s' % w.dtype), ('v', '%s' % d.dtype)] d_w = np.array(list(zip(w, d)), dtype=dtype) d_w.sort(order='v') reordered_w = d_w['w'].cumsum() cumsum_threshold = reordered_w[-1] * 1.0 / 2 median_inx = (reordered_w >= cumsum_threshold).nonzero()[0][0] if reordered_w[median_inx] == cumsum_threshold and len(d) - 1 > median_inx: return np.sort(d)[median_inx:median_inx + 2].mean() return np.sort(d)[median_inx]
python
def weighted_median(d, w): """A utility function to find a median of d based on w Parameters ---------- d : array (n, 1), variable for which median will be found w : array (n, 1), variable on which d's median will be decided Notes ----- d and w are arranged in the same order Returns ------- float median of d Examples -------- Creating an array including five integers. We will get the median of these integers. >>> d = np.array([5,4,3,1,2]) Creating another array including weight values for the above integers. The median of d will be decided with a consideration to these weight values. >>> w = np.array([10, 22, 9, 2, 5]) Applying weighted_median function >>> weighted_median(d, w) 4 """ dtype = [('w', '%s' % w.dtype), ('v', '%s' % d.dtype)] d_w = np.array(list(zip(w, d)), dtype=dtype) d_w.sort(order='v') reordered_w = d_w['w'].cumsum() cumsum_threshold = reordered_w[-1] * 1.0 / 2 median_inx = (reordered_w >= cumsum_threshold).nonzero()[0][0] if reordered_w[median_inx] == cumsum_threshold and len(d) - 1 > median_inx: return np.sort(d)[median_inx:median_inx + 2].mean() return np.sort(d)[median_inx]
[ "def", "weighted_median", "(", "d", ",", "w", ")", ":", "dtype", "=", "[", "(", "'w'", ",", "'%s'", "%", "w", ".", "dtype", ")", ",", "(", "'v'", ",", "'%s'", "%", "d", ".", "dtype", ")", "]", "d_w", "=", "np", ".", "array", "(", "list", "(", "zip", "(", "w", ",", "d", ")", ")", ",", "dtype", "=", "dtype", ")", "d_w", ".", "sort", "(", "order", "=", "'v'", ")", "reordered_w", "=", "d_w", "[", "'w'", "]", ".", "cumsum", "(", ")", "cumsum_threshold", "=", "reordered_w", "[", "-", "1", "]", "*", "1.0", "/", "2", "median_inx", "=", "(", "reordered_w", ">=", "cumsum_threshold", ")", ".", "nonzero", "(", ")", "[", "0", "]", "[", "0", "]", "if", "reordered_w", "[", "median_inx", "]", "==", "cumsum_threshold", "and", "len", "(", "d", ")", "-", "1", ">", "median_inx", ":", "return", "np", ".", "sort", "(", "d", ")", "[", "median_inx", ":", "median_inx", "+", "2", "]", ".", "mean", "(", ")", "return", "np", ".", "sort", "(", "d", ")", "[", "median_inx", "]" ]
A utility function to find a median of d based on w Parameters ---------- d : array (n, 1), variable for which median will be found w : array (n, 1), variable on which d's median will be decided Notes ----- d and w are arranged in the same order Returns ------- float median of d Examples -------- Creating an array including five integers. We will get the median of these integers. >>> d = np.array([5,4,3,1,2]) Creating another array including weight values for the above integers. The median of d will be decided with a consideration to these weight values. >>> w = np.array([10, 22, 9, 2, 5]) Applying weighted_median function >>> weighted_median(d, w) 4
[ "A", "utility", "function", "to", "find", "a", "median", "of", "d", "based", "on", "w" ]
2fafc6ec505e153152a86601d3e0fba080610c20
https://github.com/pysal/esda/blob/2fafc6ec505e153152a86601d3e0fba080610c20/esda/smoothing.py#L66-L113
train
pysal/esda
esda/smoothing.py
sum_by_n
def sum_by_n(d, w, n): """A utility function to summarize a data array into n values after weighting the array with another weight array w Parameters ---------- d : array (t, 1), numerical values w : array (t, 1), numerical values for weighting n : integer the number of groups t = c*n (c is a constant) Returns ------- : array (n, 1), an array with summarized values Examples -------- Creating an array including four integers. We will compute weighted means for every two elements. >>> d = np.array([10, 9, 20, 30]) Here is another array with the weight values for d's elements. >>> w = np.array([0.5, 0.1, 0.3, 0.8]) We specify the number of groups for which the weighted mean is computed. >>> n = 2 Applying sum_by_n function >>> sum_by_n(d, w, n) array([ 5.9, 30. ]) """ t = len(d) h = t // n #must be floor! d = d * w return np.array([sum(d[i: i + h]) for i in range(0, t, h)])
python
def sum_by_n(d, w, n): """A utility function to summarize a data array into n values after weighting the array with another weight array w Parameters ---------- d : array (t, 1), numerical values w : array (t, 1), numerical values for weighting n : integer the number of groups t = c*n (c is a constant) Returns ------- : array (n, 1), an array with summarized values Examples -------- Creating an array including four integers. We will compute weighted means for every two elements. >>> d = np.array([10, 9, 20, 30]) Here is another array with the weight values for d's elements. >>> w = np.array([0.5, 0.1, 0.3, 0.8]) We specify the number of groups for which the weighted mean is computed. >>> n = 2 Applying sum_by_n function >>> sum_by_n(d, w, n) array([ 5.9, 30. ]) """ t = len(d) h = t // n #must be floor! d = d * w return np.array([sum(d[i: i + h]) for i in range(0, t, h)])
[ "def", "sum_by_n", "(", "d", ",", "w", ",", "n", ")", ":", "t", "=", "len", "(", "d", ")", "h", "=", "t", "//", "n", "#must be floor!", "d", "=", "d", "*", "w", "return", "np", ".", "array", "(", "[", "sum", "(", "d", "[", "i", ":", "i", "+", "h", "]", ")", "for", "i", "in", "range", "(", "0", ",", "t", ",", "h", ")", "]", ")" ]
A utility function to summarize a data array into n values after weighting the array with another weight array w Parameters ---------- d : array (t, 1), numerical values w : array (t, 1), numerical values for weighting n : integer the number of groups t = c*n (c is a constant) Returns ------- : array (n, 1), an array with summarized values Examples -------- Creating an array including four integers. We will compute weighted means for every two elements. >>> d = np.array([10, 9, 20, 30]) Here is another array with the weight values for d's elements. >>> w = np.array([0.5, 0.1, 0.3, 0.8]) We specify the number of groups for which the weighted mean is computed. >>> n = 2 Applying sum_by_n function >>> sum_by_n(d, w, n) array([ 5.9, 30. ])
[ "A", "utility", "function", "to", "summarize", "a", "data", "array", "into", "n", "values", "after", "weighting", "the", "array", "with", "another", "weight", "array", "w" ]
2fafc6ec505e153152a86601d3e0fba080610c20
https://github.com/pysal/esda/blob/2fafc6ec505e153152a86601d3e0fba080610c20/esda/smoothing.py#L116-L160
train
pysal/esda
esda/smoothing.py
crude_age_standardization
def crude_age_standardization(e, b, n): """A utility function to compute rate through crude age standardization Parameters ---------- e : array (n*h, 1), event variable measured for each age group across n spatial units b : array (n*h, 1), population at risk variable measured for each age group across n spatial units n : integer the number of spatial units Notes ----- e and b are arranged in the same order Returns ------- : array (n, 1), age standardized rate Examples -------- Creating an array of an event variable (e.g., the number of cancer patients) for 2 regions in each of which 4 age groups are available. The first 4 values are event values for 4 age groups in the region 1, and the next 4 values are for 4 age groups in the region 2. >>> e = np.array([30, 25, 25, 15, 33, 21, 30, 20]) Creating another array of a population-at-risk variable (e.g., total population) for the same two regions. The order for entering values is the same as the case of e. >>> b = np.array([100, 100, 110, 90, 100, 90, 110, 90]) Specifying the number of regions. >>> n = 2 Applying crude_age_standardization function to e and b >>> crude_age_standardization(e, b, n) array([0.2375 , 0.26666667]) """ r = e * 1.0 / b b_by_n = sum_by_n(b, 1.0, n) age_weight = b * 1.0 / b_by_n.repeat(len(e) // n) return sum_by_n(r, age_weight, n)
python
def crude_age_standardization(e, b, n): """A utility function to compute rate through crude age standardization Parameters ---------- e : array (n*h, 1), event variable measured for each age group across n spatial units b : array (n*h, 1), population at risk variable measured for each age group across n spatial units n : integer the number of spatial units Notes ----- e and b are arranged in the same order Returns ------- : array (n, 1), age standardized rate Examples -------- Creating an array of an event variable (e.g., the number of cancer patients) for 2 regions in each of which 4 age groups are available. The first 4 values are event values for 4 age groups in the region 1, and the next 4 values are for 4 age groups in the region 2. >>> e = np.array([30, 25, 25, 15, 33, 21, 30, 20]) Creating another array of a population-at-risk variable (e.g., total population) for the same two regions. The order for entering values is the same as the case of e. >>> b = np.array([100, 100, 110, 90, 100, 90, 110, 90]) Specifying the number of regions. >>> n = 2 Applying crude_age_standardization function to e and b >>> crude_age_standardization(e, b, n) array([0.2375 , 0.26666667]) """ r = e * 1.0 / b b_by_n = sum_by_n(b, 1.0, n) age_weight = b * 1.0 / b_by_n.repeat(len(e) // n) return sum_by_n(r, age_weight, n)
[ "def", "crude_age_standardization", "(", "e", ",", "b", ",", "n", ")", ":", "r", "=", "e", "*", "1.0", "/", "b", "b_by_n", "=", "sum_by_n", "(", "b", ",", "1.0", ",", "n", ")", "age_weight", "=", "b", "*", "1.0", "/", "b_by_n", ".", "repeat", "(", "len", "(", "e", ")", "//", "n", ")", "return", "sum_by_n", "(", "r", ",", "age_weight", ",", "n", ")" ]
A utility function to compute rate through crude age standardization Parameters ---------- e : array (n*h, 1), event variable measured for each age group across n spatial units b : array (n*h, 1), population at risk variable measured for each age group across n spatial units n : integer the number of spatial units Notes ----- e and b are arranged in the same order Returns ------- : array (n, 1), age standardized rate Examples -------- Creating an array of an event variable (e.g., the number of cancer patients) for 2 regions in each of which 4 age groups are available. The first 4 values are event values for 4 age groups in the region 1, and the next 4 values are for 4 age groups in the region 2. >>> e = np.array([30, 25, 25, 15, 33, 21, 30, 20]) Creating another array of a population-at-risk variable (e.g., total population) for the same two regions. The order for entering values is the same as the case of e. >>> b = np.array([100, 100, 110, 90, 100, 90, 110, 90]) Specifying the number of regions. >>> n = 2 Applying crude_age_standardization function to e and b >>> crude_age_standardization(e, b, n) array([0.2375 , 0.26666667])
[ "A", "utility", "function", "to", "compute", "rate", "through", "crude", "age", "standardization" ]
2fafc6ec505e153152a86601d3e0fba080610c20
https://github.com/pysal/esda/blob/2fafc6ec505e153152a86601d3e0fba080610c20/esda/smoothing.py#L163-L213
train
pysal/esda
esda/smoothing.py
direct_age_standardization
def direct_age_standardization(e, b, s, n, alpha=0.05): """A utility function to compute rate through direct age standardization Parameters ---------- e : array (n*h, 1), event variable measured for each age group across n spatial units b : array (n*h, 1), population at risk variable measured for each age group across n spatial units s : array (n*h, 1), standard population for each age group across n spatial units n : integer the number of spatial units alpha : float significance level for confidence interval Notes ----- e, b, and s are arranged in the same order Returns ------- list a list of n tuples; a tuple has a rate and its lower and upper limits age standardized rates and confidence intervals Examples -------- Creating an array of an event variable (e.g., the number of cancer patients) for 2 regions in each of which 4 age groups are available. The first 4 values are event values for 4 age groups in the region 1, and the next 4 values are for 4 age groups in the region 2. >>> e = np.array([30, 25, 25, 15, 33, 21, 30, 20]) Creating another array of a population-at-risk variable (e.g., total population) for the same two regions. The order for entering values is the same as the case of e. >>> b = np.array([1000, 1000, 1100, 900, 1000, 900, 1100, 900]) For direct age standardization, we also need the data for standard population. Standard population is a reference population-at-risk (e.g., population distribution for the U.S.) whose age distribution can be used as a benchmarking point for comparing age distributions across regions (e.g., population distribution for Arizona and California). Another array including standard population is created. >>> s = np.array([1000, 900, 1000, 900, 1000, 900, 1000, 900]) Specifying the number of regions. >>> n = 2 Applying direct_age_standardization function to e and b >>> a, b = [i[0] for i in direct_age_standardization(e, b, s, n)] >>> round(a, 4) 0.0237 >>> round(b, 4) 0.0267 """ age_weight = (1.0 / b) * (s * 1.0 / sum_by_n(s, 1.0, n).repeat(len(s) // n)) adjusted_r = sum_by_n(e, age_weight, n) var_estimate = sum_by_n(e, np.square(age_weight), n) g_a = np.square(adjusted_r) / var_estimate g_b = var_estimate / adjusted_r k = [age_weight[i:i + len(b) // n].max() for i in range(0, len(b), len(b) // n)] g_a_k = np.square(adjusted_r + k) / (var_estimate + np.square(k)) g_b_k = (var_estimate + np.square(k)) / (adjusted_r + k) summed_b = sum_by_n(b, 1.0, n) res = [] for i in range(len(adjusted_r)): if adjusted_r[i] == 0: upper = 0.5 * chi2.ppf(1 - 0.5 * alpha) lower = 0.0 else: lower = gamma.ppf(0.5 * alpha, g_a[i], scale=g_b[i]) upper = gamma.ppf(1 - 0.5 * alpha, g_a_k[i], scale=g_b_k[i]) res.append((adjusted_r[i], lower, upper)) return res
python
def direct_age_standardization(e, b, s, n, alpha=0.05): """A utility function to compute rate through direct age standardization Parameters ---------- e : array (n*h, 1), event variable measured for each age group across n spatial units b : array (n*h, 1), population at risk variable measured for each age group across n spatial units s : array (n*h, 1), standard population for each age group across n spatial units n : integer the number of spatial units alpha : float significance level for confidence interval Notes ----- e, b, and s are arranged in the same order Returns ------- list a list of n tuples; a tuple has a rate and its lower and upper limits age standardized rates and confidence intervals Examples -------- Creating an array of an event variable (e.g., the number of cancer patients) for 2 regions in each of which 4 age groups are available. The first 4 values are event values for 4 age groups in the region 1, and the next 4 values are for 4 age groups in the region 2. >>> e = np.array([30, 25, 25, 15, 33, 21, 30, 20]) Creating another array of a population-at-risk variable (e.g., total population) for the same two regions. The order for entering values is the same as the case of e. >>> b = np.array([1000, 1000, 1100, 900, 1000, 900, 1100, 900]) For direct age standardization, we also need the data for standard population. Standard population is a reference population-at-risk (e.g., population distribution for the U.S.) whose age distribution can be used as a benchmarking point for comparing age distributions across regions (e.g., population distribution for Arizona and California). Another array including standard population is created. >>> s = np.array([1000, 900, 1000, 900, 1000, 900, 1000, 900]) Specifying the number of regions. >>> n = 2 Applying direct_age_standardization function to e and b >>> a, b = [i[0] for i in direct_age_standardization(e, b, s, n)] >>> round(a, 4) 0.0237 >>> round(b, 4) 0.0267 """ age_weight = (1.0 / b) * (s * 1.0 / sum_by_n(s, 1.0, n).repeat(len(s) // n)) adjusted_r = sum_by_n(e, age_weight, n) var_estimate = sum_by_n(e, np.square(age_weight), n) g_a = np.square(adjusted_r) / var_estimate g_b = var_estimate / adjusted_r k = [age_weight[i:i + len(b) // n].max() for i in range(0, len(b), len(b) // n)] g_a_k = np.square(adjusted_r + k) / (var_estimate + np.square(k)) g_b_k = (var_estimate + np.square(k)) / (adjusted_r + k) summed_b = sum_by_n(b, 1.0, n) res = [] for i in range(len(adjusted_r)): if adjusted_r[i] == 0: upper = 0.5 * chi2.ppf(1 - 0.5 * alpha) lower = 0.0 else: lower = gamma.ppf(0.5 * alpha, g_a[i], scale=g_b[i]) upper = gamma.ppf(1 - 0.5 * alpha, g_a_k[i], scale=g_b_k[i]) res.append((adjusted_r[i], lower, upper)) return res
[ "def", "direct_age_standardization", "(", "e", ",", "b", ",", "s", ",", "n", ",", "alpha", "=", "0.05", ")", ":", "age_weight", "=", "(", "1.0", "/", "b", ")", "*", "(", "s", "*", "1.0", "/", "sum_by_n", "(", "s", ",", "1.0", ",", "n", ")", ".", "repeat", "(", "len", "(", "s", ")", "//", "n", ")", ")", "adjusted_r", "=", "sum_by_n", "(", "e", ",", "age_weight", ",", "n", ")", "var_estimate", "=", "sum_by_n", "(", "e", ",", "np", ".", "square", "(", "age_weight", ")", ",", "n", ")", "g_a", "=", "np", ".", "square", "(", "adjusted_r", ")", "/", "var_estimate", "g_b", "=", "var_estimate", "/", "adjusted_r", "k", "=", "[", "age_weight", "[", "i", ":", "i", "+", "len", "(", "b", ")", "//", "n", "]", ".", "max", "(", ")", "for", "i", "in", "range", "(", "0", ",", "len", "(", "b", ")", ",", "len", "(", "b", ")", "//", "n", ")", "]", "g_a_k", "=", "np", ".", "square", "(", "adjusted_r", "+", "k", ")", "/", "(", "var_estimate", "+", "np", ".", "square", "(", "k", ")", ")", "g_b_k", "=", "(", "var_estimate", "+", "np", ".", "square", "(", "k", ")", ")", "/", "(", "adjusted_r", "+", "k", ")", "summed_b", "=", "sum_by_n", "(", "b", ",", "1.0", ",", "n", ")", "res", "=", "[", "]", "for", "i", "in", "range", "(", "len", "(", "adjusted_r", ")", ")", ":", "if", "adjusted_r", "[", "i", "]", "==", "0", ":", "upper", "=", "0.5", "*", "chi2", ".", "ppf", "(", "1", "-", "0.5", "*", "alpha", ")", "lower", "=", "0.0", "else", ":", "lower", "=", "gamma", ".", "ppf", "(", "0.5", "*", "alpha", ",", "g_a", "[", "i", "]", ",", "scale", "=", "g_b", "[", "i", "]", ")", "upper", "=", "gamma", ".", "ppf", "(", "1", "-", "0.5", "*", "alpha", ",", "g_a_k", "[", "i", "]", ",", "scale", "=", "g_b_k", "[", "i", "]", ")", "res", ".", "append", "(", "(", "adjusted_r", "[", "i", "]", ",", "lower", ",", "upper", ")", ")", "return", "res" ]
A utility function to compute rate through direct age standardization Parameters ---------- e : array (n*h, 1), event variable measured for each age group across n spatial units b : array (n*h, 1), population at risk variable measured for each age group across n spatial units s : array (n*h, 1), standard population for each age group across n spatial units n : integer the number of spatial units alpha : float significance level for confidence interval Notes ----- e, b, and s are arranged in the same order Returns ------- list a list of n tuples; a tuple has a rate and its lower and upper limits age standardized rates and confidence intervals Examples -------- Creating an array of an event variable (e.g., the number of cancer patients) for 2 regions in each of which 4 age groups are available. The first 4 values are event values for 4 age groups in the region 1, and the next 4 values are for 4 age groups in the region 2. >>> e = np.array([30, 25, 25, 15, 33, 21, 30, 20]) Creating another array of a population-at-risk variable (e.g., total population) for the same two regions. The order for entering values is the same as the case of e. >>> b = np.array([1000, 1000, 1100, 900, 1000, 900, 1100, 900]) For direct age standardization, we also need the data for standard population. Standard population is a reference population-at-risk (e.g., population distribution for the U.S.) whose age distribution can be used as a benchmarking point for comparing age distributions across regions (e.g., population distribution for Arizona and California). Another array including standard population is created. >>> s = np.array([1000, 900, 1000, 900, 1000, 900, 1000, 900]) Specifying the number of regions. >>> n = 2 Applying direct_age_standardization function to e and b >>> a, b = [i[0] for i in direct_age_standardization(e, b, s, n)] >>> round(a, 4) 0.0237 >>> round(b, 4) 0.0267
[ "A", "utility", "function", "to", "compute", "rate", "through", "direct", "age", "standardization" ]
2fafc6ec505e153152a86601d3e0fba080610c20
https://github.com/pysal/esda/blob/2fafc6ec505e153152a86601d3e0fba080610c20/esda/smoothing.py#L216-L298
train
pysal/esda
esda/smoothing.py
indirect_age_standardization
def indirect_age_standardization(e, b, s_e, s_b, n, alpha=0.05): """A utility function to compute rate through indirect age standardization Parameters ---------- e : array (n*h, 1), event variable measured for each age group across n spatial units b : array (n*h, 1), population at risk variable measured for each age group across n spatial units s_e : array (n*h, 1), event variable measured for each age group across n spatial units in a standard population s_b : array (n*h, 1), population variable measured for each age group across n spatial units in a standard population n : integer the number of spatial units alpha : float significance level for confidence interval Notes ----- e, b, s_e, and s_b are arranged in the same order Returns ------- list a list of n tuples; a tuple has a rate and its lower and upper limits age standardized rate Examples -------- Creating an array of an event variable (e.g., the number of cancer patients) for 2 regions in each of which 4 age groups are available. The first 4 values are event values for 4 age groups in the region 1, and the next 4 values are for 4 age groups in the region 2. >>> e = np.array([30, 25, 25, 15, 33, 21, 30, 20]) Creating another array of a population-at-risk variable (e.g., total population) for the same two regions. The order for entering values is the same as the case of e. >>> b = np.array([100, 100, 110, 90, 100, 90, 110, 90]) For indirect age standardization, we also need the data for standard population and event. Standard population is a reference population-at-risk (e.g., population distribution for the U.S.) whose age distribution can be used as a benchmarking point for comparing age distributions across regions (e.g., popoulation distribution for Arizona and California). When the same concept is applied to the event variable, we call it standard event (e.g., the number of cancer patients in the U.S.). Two additional arrays including standard population and event are created. >>> s_e = np.array([100, 45, 120, 100, 50, 30, 200, 80]) >>> s_b = np.array([1000, 900, 1000, 900, 1000, 900, 1000, 900]) Specifying the number of regions. >>> n = 2 Applying indirect_age_standardization function to e and b >>> [i[0] for i in indirect_age_standardization(e, b, s_e, s_b, n)] [0.23723821989528798, 0.2610803324099723] """ smr = standardized_mortality_ratio(e, b, s_e, s_b, n) s_r_all = sum(s_e * 1.0) / sum(s_b * 1.0) adjusted_r = s_r_all * smr e_by_n = sum_by_n(e, 1.0, n) log_smr = np.log(smr) log_smr_sd = 1.0 / np.sqrt(e_by_n) norm_thres = norm.ppf(1 - 0.5 * alpha) log_smr_lower = log_smr - norm_thres * log_smr_sd log_smr_upper = log_smr + norm_thres * log_smr_sd smr_lower = np.exp(log_smr_lower) * s_r_all smr_upper = np.exp(log_smr_upper) * s_r_all res = list(zip(adjusted_r, smr_lower, smr_upper)) return res
python
def indirect_age_standardization(e, b, s_e, s_b, n, alpha=0.05): """A utility function to compute rate through indirect age standardization Parameters ---------- e : array (n*h, 1), event variable measured for each age group across n spatial units b : array (n*h, 1), population at risk variable measured for each age group across n spatial units s_e : array (n*h, 1), event variable measured for each age group across n spatial units in a standard population s_b : array (n*h, 1), population variable measured for each age group across n spatial units in a standard population n : integer the number of spatial units alpha : float significance level for confidence interval Notes ----- e, b, s_e, and s_b are arranged in the same order Returns ------- list a list of n tuples; a tuple has a rate and its lower and upper limits age standardized rate Examples -------- Creating an array of an event variable (e.g., the number of cancer patients) for 2 regions in each of which 4 age groups are available. The first 4 values are event values for 4 age groups in the region 1, and the next 4 values are for 4 age groups in the region 2. >>> e = np.array([30, 25, 25, 15, 33, 21, 30, 20]) Creating another array of a population-at-risk variable (e.g., total population) for the same two regions. The order for entering values is the same as the case of e. >>> b = np.array([100, 100, 110, 90, 100, 90, 110, 90]) For indirect age standardization, we also need the data for standard population and event. Standard population is a reference population-at-risk (e.g., population distribution for the U.S.) whose age distribution can be used as a benchmarking point for comparing age distributions across regions (e.g., popoulation distribution for Arizona and California). When the same concept is applied to the event variable, we call it standard event (e.g., the number of cancer patients in the U.S.). Two additional arrays including standard population and event are created. >>> s_e = np.array([100, 45, 120, 100, 50, 30, 200, 80]) >>> s_b = np.array([1000, 900, 1000, 900, 1000, 900, 1000, 900]) Specifying the number of regions. >>> n = 2 Applying indirect_age_standardization function to e and b >>> [i[0] for i in indirect_age_standardization(e, b, s_e, s_b, n)] [0.23723821989528798, 0.2610803324099723] """ smr = standardized_mortality_ratio(e, b, s_e, s_b, n) s_r_all = sum(s_e * 1.0) / sum(s_b * 1.0) adjusted_r = s_r_all * smr e_by_n = sum_by_n(e, 1.0, n) log_smr = np.log(smr) log_smr_sd = 1.0 / np.sqrt(e_by_n) norm_thres = norm.ppf(1 - 0.5 * alpha) log_smr_lower = log_smr - norm_thres * log_smr_sd log_smr_upper = log_smr + norm_thres * log_smr_sd smr_lower = np.exp(log_smr_lower) * s_r_all smr_upper = np.exp(log_smr_upper) * s_r_all res = list(zip(adjusted_r, smr_lower, smr_upper)) return res
[ "def", "indirect_age_standardization", "(", "e", ",", "b", ",", "s_e", ",", "s_b", ",", "n", ",", "alpha", "=", "0.05", ")", ":", "smr", "=", "standardized_mortality_ratio", "(", "e", ",", "b", ",", "s_e", ",", "s_b", ",", "n", ")", "s_r_all", "=", "sum", "(", "s_e", "*", "1.0", ")", "/", "sum", "(", "s_b", "*", "1.0", ")", "adjusted_r", "=", "s_r_all", "*", "smr", "e_by_n", "=", "sum_by_n", "(", "e", ",", "1.0", ",", "n", ")", "log_smr", "=", "np", ".", "log", "(", "smr", ")", "log_smr_sd", "=", "1.0", "/", "np", ".", "sqrt", "(", "e_by_n", ")", "norm_thres", "=", "norm", ".", "ppf", "(", "1", "-", "0.5", "*", "alpha", ")", "log_smr_lower", "=", "log_smr", "-", "norm_thres", "*", "log_smr_sd", "log_smr_upper", "=", "log_smr", "+", "norm_thres", "*", "log_smr_sd", "smr_lower", "=", "np", ".", "exp", "(", "log_smr_lower", ")", "*", "s_r_all", "smr_upper", "=", "np", ".", "exp", "(", "log_smr_upper", ")", "*", "s_r_all", "res", "=", "list", "(", "zip", "(", "adjusted_r", ",", "smr_lower", ",", "smr_upper", ")", ")", "return", "res" ]
A utility function to compute rate through indirect age standardization Parameters ---------- e : array (n*h, 1), event variable measured for each age group across n spatial units b : array (n*h, 1), population at risk variable measured for each age group across n spatial units s_e : array (n*h, 1), event variable measured for each age group across n spatial units in a standard population s_b : array (n*h, 1), population variable measured for each age group across n spatial units in a standard population n : integer the number of spatial units alpha : float significance level for confidence interval Notes ----- e, b, s_e, and s_b are arranged in the same order Returns ------- list a list of n tuples; a tuple has a rate and its lower and upper limits age standardized rate Examples -------- Creating an array of an event variable (e.g., the number of cancer patients) for 2 regions in each of which 4 age groups are available. The first 4 values are event values for 4 age groups in the region 1, and the next 4 values are for 4 age groups in the region 2. >>> e = np.array([30, 25, 25, 15, 33, 21, 30, 20]) Creating another array of a population-at-risk variable (e.g., total population) for the same two regions. The order for entering values is the same as the case of e. >>> b = np.array([100, 100, 110, 90, 100, 90, 110, 90]) For indirect age standardization, we also need the data for standard population and event. Standard population is a reference population-at-risk (e.g., population distribution for the U.S.) whose age distribution can be used as a benchmarking point for comparing age distributions across regions (e.g., popoulation distribution for Arizona and California). When the same concept is applied to the event variable, we call it standard event (e.g., the number of cancer patients in the U.S.). Two additional arrays including standard population and event are created. >>> s_e = np.array([100, 45, 120, 100, 50, 30, 200, 80]) >>> s_b = np.array([1000, 900, 1000, 900, 1000, 900, 1000, 900]) Specifying the number of regions. >>> n = 2 Applying indirect_age_standardization function to e and b >>> [i[0] for i in indirect_age_standardization(e, b, s_e, s_b, n)] [0.23723821989528798, 0.2610803324099723]
[ "A", "utility", "function", "to", "compute", "rate", "through", "indirect", "age", "standardization" ]
2fafc6ec505e153152a86601d3e0fba080610c20
https://github.com/pysal/esda/blob/2fafc6ec505e153152a86601d3e0fba080610c20/esda/smoothing.py#L301-L379
train
pysal/esda
esda/tabular.py
_univariate_handler
def _univariate_handler(df, cols, stat=None, w=None, inplace=True, pvalue = 'sim', outvals = None, swapname='', **kwargs): """ Compute a univariate descriptive statistic `stat` over columns `cols` in `df`. Parameters ---------- df : pandas.DataFrame the dataframe containing columns to compute the descriptive statistics cols : string or list of strings one or more names of columns in `df` to use to compute exploratory descriptive statistics. stat : callable a function that takes data as a first argument and any number of configuration keyword arguments and returns an object encapsulating the exploratory statistic results w : pysal.weights.W the spatial weights object corresponding to the dataframe inplace : bool a flag denoting whether to add the statistic to the dataframe in memory, or to construct a copy of the dataframe and append the results to the copy pvalue : string the name of the pvalue on the results object wanted outvals : list of strings names of attributes of the dataframe to attempt to flatten into a column swapname : string suffix to replace generic identifier with. Each caller of this function should set this to a unique column suffix **kwargs : optional keyword arguments options that are passed directly to the statistic """ ### Preprocess if not inplace: new_df = df.copy() _univariate_handler(new_df, cols, stat=stat, w=w, pvalue=pvalue, inplace=True, outvals=outvals, swapname=swapname, **kwargs) return new_df if w is None: for name in df._metadata: this_obj = df.__dict__.get(name) if isinstance(this_obj, W): w = this_obj if w is None: raise Exception('Weights not provided and no weights attached to frame!' ' Please provide a weight or attach a weight to the' ' dataframe') ### Prep indexes if outvals is None: outvals = [] outvals.insert(0,'_statistic') if pvalue.lower() in ['all', 'both', '*']: raise NotImplementedError("If you want more than one type of PValue,add" " the targeted pvalue type to outvals. For example:" " Geary(df, cols=['HOVAL'], w=w, outvals=['p_z_sim', " "'p_rand']") # this is nontrivial, since we # can't know which p_value types are on the object without computing it. # This is because we don't flag them with @properties, so they're just # arbitrarily assigned post-facto. One solution might be to post-process the # objects, determine which pvalue types are available, and then grab them # all if needed. if pvalue is not '': outvals.append('p_'+pvalue.lower()) if isinstance(cols, str): cols = [cols] ### Make closure around weights & apply columnwise def column_stat(column): return stat(column.values, w=w, **kwargs) stat_objs = df[cols].apply(column_stat) ### Assign into dataframe for col in cols: stat_obj = stat_objs[col] y = kwargs.get('y') if y is not None: col += '-' + y.name outcols = ['_'.join((col, val)) for val in outvals] for colname, attname in zip(outcols, outvals): df[colname] = stat_obj.__getattribute__(attname) if swapname is not '': df.columns = [_swap_ending(col, swapname) if col.endswith('_statistic') else col for col in df.columns]
python
def _univariate_handler(df, cols, stat=None, w=None, inplace=True, pvalue = 'sim', outvals = None, swapname='', **kwargs): """ Compute a univariate descriptive statistic `stat` over columns `cols` in `df`. Parameters ---------- df : pandas.DataFrame the dataframe containing columns to compute the descriptive statistics cols : string or list of strings one or more names of columns in `df` to use to compute exploratory descriptive statistics. stat : callable a function that takes data as a first argument and any number of configuration keyword arguments and returns an object encapsulating the exploratory statistic results w : pysal.weights.W the spatial weights object corresponding to the dataframe inplace : bool a flag denoting whether to add the statistic to the dataframe in memory, or to construct a copy of the dataframe and append the results to the copy pvalue : string the name of the pvalue on the results object wanted outvals : list of strings names of attributes of the dataframe to attempt to flatten into a column swapname : string suffix to replace generic identifier with. Each caller of this function should set this to a unique column suffix **kwargs : optional keyword arguments options that are passed directly to the statistic """ ### Preprocess if not inplace: new_df = df.copy() _univariate_handler(new_df, cols, stat=stat, w=w, pvalue=pvalue, inplace=True, outvals=outvals, swapname=swapname, **kwargs) return new_df if w is None: for name in df._metadata: this_obj = df.__dict__.get(name) if isinstance(this_obj, W): w = this_obj if w is None: raise Exception('Weights not provided and no weights attached to frame!' ' Please provide a weight or attach a weight to the' ' dataframe') ### Prep indexes if outvals is None: outvals = [] outvals.insert(0,'_statistic') if pvalue.lower() in ['all', 'both', '*']: raise NotImplementedError("If you want more than one type of PValue,add" " the targeted pvalue type to outvals. For example:" " Geary(df, cols=['HOVAL'], w=w, outvals=['p_z_sim', " "'p_rand']") # this is nontrivial, since we # can't know which p_value types are on the object without computing it. # This is because we don't flag them with @properties, so they're just # arbitrarily assigned post-facto. One solution might be to post-process the # objects, determine which pvalue types are available, and then grab them # all if needed. if pvalue is not '': outvals.append('p_'+pvalue.lower()) if isinstance(cols, str): cols = [cols] ### Make closure around weights & apply columnwise def column_stat(column): return stat(column.values, w=w, **kwargs) stat_objs = df[cols].apply(column_stat) ### Assign into dataframe for col in cols: stat_obj = stat_objs[col] y = kwargs.get('y') if y is not None: col += '-' + y.name outcols = ['_'.join((col, val)) for val in outvals] for colname, attname in zip(outcols, outvals): df[colname] = stat_obj.__getattribute__(attname) if swapname is not '': df.columns = [_swap_ending(col, swapname) if col.endswith('_statistic') else col for col in df.columns]
[ "def", "_univariate_handler", "(", "df", ",", "cols", ",", "stat", "=", "None", ",", "w", "=", "None", ",", "inplace", "=", "True", ",", "pvalue", "=", "'sim'", ",", "outvals", "=", "None", ",", "swapname", "=", "''", ",", "*", "*", "kwargs", ")", ":", "### Preprocess", "if", "not", "inplace", ":", "new_df", "=", "df", ".", "copy", "(", ")", "_univariate_handler", "(", "new_df", ",", "cols", ",", "stat", "=", "stat", ",", "w", "=", "w", ",", "pvalue", "=", "pvalue", ",", "inplace", "=", "True", ",", "outvals", "=", "outvals", ",", "swapname", "=", "swapname", ",", "*", "*", "kwargs", ")", "return", "new_df", "if", "w", "is", "None", ":", "for", "name", "in", "df", ".", "_metadata", ":", "this_obj", "=", "df", ".", "__dict__", ".", "get", "(", "name", ")", "if", "isinstance", "(", "this_obj", ",", "W", ")", ":", "w", "=", "this_obj", "if", "w", "is", "None", ":", "raise", "Exception", "(", "'Weights not provided and no weights attached to frame!'", "' Please provide a weight or attach a weight to the'", "' dataframe'", ")", "### Prep indexes", "if", "outvals", "is", "None", ":", "outvals", "=", "[", "]", "outvals", ".", "insert", "(", "0", ",", "'_statistic'", ")", "if", "pvalue", ".", "lower", "(", ")", "in", "[", "'all'", ",", "'both'", ",", "'*'", "]", ":", "raise", "NotImplementedError", "(", "\"If you want more than one type of PValue,add\"", "\" the targeted pvalue type to outvals. For example:\"", "\" Geary(df, cols=['HOVAL'], w=w, outvals=['p_z_sim', \"", "\"'p_rand']\"", ")", "# this is nontrivial, since we", "# can't know which p_value types are on the object without computing it.", "# This is because we don't flag them with @properties, so they're just", "# arbitrarily assigned post-facto. One solution might be to post-process the", "# objects, determine which pvalue types are available, and then grab them", "# all if needed.", "if", "pvalue", "is", "not", "''", ":", "outvals", ".", "append", "(", "'p_'", "+", "pvalue", ".", "lower", "(", ")", ")", "if", "isinstance", "(", "cols", ",", "str", ")", ":", "cols", "=", "[", "cols", "]", "### Make closure around weights & apply columnwise", "def", "column_stat", "(", "column", ")", ":", "return", "stat", "(", "column", ".", "values", ",", "w", "=", "w", ",", "*", "*", "kwargs", ")", "stat_objs", "=", "df", "[", "cols", "]", ".", "apply", "(", "column_stat", ")", "### Assign into dataframe", "for", "col", "in", "cols", ":", "stat_obj", "=", "stat_objs", "[", "col", "]", "y", "=", "kwargs", ".", "get", "(", "'y'", ")", "if", "y", "is", "not", "None", ":", "col", "+=", "'-'", "+", "y", ".", "name", "outcols", "=", "[", "'_'", ".", "join", "(", "(", "col", ",", "val", ")", ")", "for", "val", "in", "outvals", "]", "for", "colname", ",", "attname", "in", "zip", "(", "outcols", ",", "outvals", ")", ":", "df", "[", "colname", "]", "=", "stat_obj", ".", "__getattribute__", "(", "attname", ")", "if", "swapname", "is", "not", "''", ":", "df", ".", "columns", "=", "[", "_swap_ending", "(", "col", ",", "swapname", ")", "if", "col", ".", "endswith", "(", "'_statistic'", ")", "else", "col", "for", "col", "in", "df", ".", "columns", "]" ]
Compute a univariate descriptive statistic `stat` over columns `cols` in `df`. Parameters ---------- df : pandas.DataFrame the dataframe containing columns to compute the descriptive statistics cols : string or list of strings one or more names of columns in `df` to use to compute exploratory descriptive statistics. stat : callable a function that takes data as a first argument and any number of configuration keyword arguments and returns an object encapsulating the exploratory statistic results w : pysal.weights.W the spatial weights object corresponding to the dataframe inplace : bool a flag denoting whether to add the statistic to the dataframe in memory, or to construct a copy of the dataframe and append the results to the copy pvalue : string the name of the pvalue on the results object wanted outvals : list of strings names of attributes of the dataframe to attempt to flatten into a column swapname : string suffix to replace generic identifier with. Each caller of this function should set this to a unique column suffix **kwargs : optional keyword arguments options that are passed directly to the statistic
[ "Compute", "a", "univariate", "descriptive", "statistic", "stat", "over", "columns", "cols", "in", "df", "." ]
2fafc6ec505e153152a86601d3e0fba080610c20
https://github.com/pysal/esda/blob/2fafc6ec505e153152a86601d3e0fba080610c20/esda/tabular.py#L10-L98
train
pysal/esda
esda/tabular.py
_bivariate_handler
def _bivariate_handler(df, x, y=None, w=None, inplace=True, pvalue='sim', outvals=None, **kwargs): """ Compute a descriptive bivariate statistic over two sets of columns, `x` and `y`, contained in `df`. Parameters ---------- df : pandas.DataFrame dataframe in which columns `x` and `y` are contained x : string or list of strings one or more column names to use as variates in the bivariate statistics y : string or list of strings one or more column names to use as variates in the bivariate statistics w : pysal.weights.W spatial weights object corresponding to the dataframe `df` inplace : bool a flag denoting whether to add the statistic to the dataframe in memory, or to construct a copy of the dataframe and append the results to the copy pvalue : string the name of the pvalue on the results object wanted outvals : list of strings names of attributes of the dataframe to attempt to flatten into a column swapname : string suffix to replace generic identifier with. Each caller of this function should set this to a unique column suffix **kwargs : optional keyword arguments options that are passed directly to the statistic """ real_swapname = kwargs.pop('swapname', '') if isinstance(y, str): y = [y] if isinstance(x, str): x = [x] if not inplace: new_df = df.copy() _bivariate_handler(new_df, x, y=y, w=w, inplace=True, swapname=real_swapname, pvalue=pvalue, outvals=outvals, **kwargs) return new_df if y is None: y = x for xi,yi in _it.product(x,y): if xi == yi: continue _univariate_handler(df, cols=xi, w=w, y=df[yi], inplace=True, pvalue=pvalue, outvals=outvals, swapname='', **kwargs) if real_swapname is not '': df.columns = [_swap_ending(col, real_swapname) if col.endswith('_statistic') else col for col in df.columns]
python
def _bivariate_handler(df, x, y=None, w=None, inplace=True, pvalue='sim', outvals=None, **kwargs): """ Compute a descriptive bivariate statistic over two sets of columns, `x` and `y`, contained in `df`. Parameters ---------- df : pandas.DataFrame dataframe in which columns `x` and `y` are contained x : string or list of strings one or more column names to use as variates in the bivariate statistics y : string or list of strings one or more column names to use as variates in the bivariate statistics w : pysal.weights.W spatial weights object corresponding to the dataframe `df` inplace : bool a flag denoting whether to add the statistic to the dataframe in memory, or to construct a copy of the dataframe and append the results to the copy pvalue : string the name of the pvalue on the results object wanted outvals : list of strings names of attributes of the dataframe to attempt to flatten into a column swapname : string suffix to replace generic identifier with. Each caller of this function should set this to a unique column suffix **kwargs : optional keyword arguments options that are passed directly to the statistic """ real_swapname = kwargs.pop('swapname', '') if isinstance(y, str): y = [y] if isinstance(x, str): x = [x] if not inplace: new_df = df.copy() _bivariate_handler(new_df, x, y=y, w=w, inplace=True, swapname=real_swapname, pvalue=pvalue, outvals=outvals, **kwargs) return new_df if y is None: y = x for xi,yi in _it.product(x,y): if xi == yi: continue _univariate_handler(df, cols=xi, w=w, y=df[yi], inplace=True, pvalue=pvalue, outvals=outvals, swapname='', **kwargs) if real_swapname is not '': df.columns = [_swap_ending(col, real_swapname) if col.endswith('_statistic') else col for col in df.columns]
[ "def", "_bivariate_handler", "(", "df", ",", "x", ",", "y", "=", "None", ",", "w", "=", "None", ",", "inplace", "=", "True", ",", "pvalue", "=", "'sim'", ",", "outvals", "=", "None", ",", "*", "*", "kwargs", ")", ":", "real_swapname", "=", "kwargs", ".", "pop", "(", "'swapname'", ",", "''", ")", "if", "isinstance", "(", "y", ",", "str", ")", ":", "y", "=", "[", "y", "]", "if", "isinstance", "(", "x", ",", "str", ")", ":", "x", "=", "[", "x", "]", "if", "not", "inplace", ":", "new_df", "=", "df", ".", "copy", "(", ")", "_bivariate_handler", "(", "new_df", ",", "x", ",", "y", "=", "y", ",", "w", "=", "w", ",", "inplace", "=", "True", ",", "swapname", "=", "real_swapname", ",", "pvalue", "=", "pvalue", ",", "outvals", "=", "outvals", ",", "*", "*", "kwargs", ")", "return", "new_df", "if", "y", "is", "None", ":", "y", "=", "x", "for", "xi", ",", "yi", "in", "_it", ".", "product", "(", "x", ",", "y", ")", ":", "if", "xi", "==", "yi", ":", "continue", "_univariate_handler", "(", "df", ",", "cols", "=", "xi", ",", "w", "=", "w", ",", "y", "=", "df", "[", "yi", "]", ",", "inplace", "=", "True", ",", "pvalue", "=", "pvalue", ",", "outvals", "=", "outvals", ",", "swapname", "=", "''", ",", "*", "*", "kwargs", ")", "if", "real_swapname", "is", "not", "''", ":", "df", ".", "columns", "=", "[", "_swap_ending", "(", "col", ",", "real_swapname", ")", "if", "col", ".", "endswith", "(", "'_statistic'", ")", "else", "col", "for", "col", "in", "df", ".", "columns", "]" ]
Compute a descriptive bivariate statistic over two sets of columns, `x` and `y`, contained in `df`. Parameters ---------- df : pandas.DataFrame dataframe in which columns `x` and `y` are contained x : string or list of strings one or more column names to use as variates in the bivariate statistics y : string or list of strings one or more column names to use as variates in the bivariate statistics w : pysal.weights.W spatial weights object corresponding to the dataframe `df` inplace : bool a flag denoting whether to add the statistic to the dataframe in memory, or to construct a copy of the dataframe and append the results to the copy pvalue : string the name of the pvalue on the results object wanted outvals : list of strings names of attributes of the dataframe to attempt to flatten into a column swapname : string suffix to replace generic identifier with. Each caller of this function should set this to a unique column suffix **kwargs : optional keyword arguments options that are passed directly to the statistic
[ "Compute", "a", "descriptive", "bivariate", "statistic", "over", "two", "sets", "of", "columns", "x", "and", "y", "contained", "in", "df", "." ]
2fafc6ec505e153152a86601d3e0fba080610c20
https://github.com/pysal/esda/blob/2fafc6ec505e153152a86601d3e0fba080610c20/esda/tabular.py#L100-L154
train
pysal/esda
esda/tabular.py
_swap_ending
def _swap_ending(s, ending, delim='_'): """ Replace the ending of a string, delimited into an arbitrary number of chunks by `delim`, with the ending provided Parameters ---------- s : string string to replace endings ending : string string used to replace ending of `s` delim : string string that splits s into one or more parts Returns ------- new string where the final chunk of `s`, delimited by `delim`, is replaced with `ending`. """ parts = [x for x in s.split(delim)[:-1] if x != ''] parts.append(ending) return delim.join(parts)
python
def _swap_ending(s, ending, delim='_'): """ Replace the ending of a string, delimited into an arbitrary number of chunks by `delim`, with the ending provided Parameters ---------- s : string string to replace endings ending : string string used to replace ending of `s` delim : string string that splits s into one or more parts Returns ------- new string where the final chunk of `s`, delimited by `delim`, is replaced with `ending`. """ parts = [x for x in s.split(delim)[:-1] if x != ''] parts.append(ending) return delim.join(parts)
[ "def", "_swap_ending", "(", "s", ",", "ending", ",", "delim", "=", "'_'", ")", ":", "parts", "=", "[", "x", "for", "x", "in", "s", ".", "split", "(", "delim", ")", "[", ":", "-", "1", "]", "if", "x", "!=", "''", "]", "parts", ".", "append", "(", "ending", ")", "return", "delim", ".", "join", "(", "parts", ")" ]
Replace the ending of a string, delimited into an arbitrary number of chunks by `delim`, with the ending provided Parameters ---------- s : string string to replace endings ending : string string used to replace ending of `s` delim : string string that splits s into one or more parts Returns ------- new string where the final chunk of `s`, delimited by `delim`, is replaced with `ending`.
[ "Replace", "the", "ending", "of", "a", "string", "delimited", "into", "an", "arbitrary", "number", "of", "chunks", "by", "delim", "with", "the", "ending", "provided" ]
2fafc6ec505e153152a86601d3e0fba080610c20
https://github.com/pysal/esda/blob/2fafc6ec505e153152a86601d3e0fba080610c20/esda/tabular.py#L156-L177
train
symengine/symengine.py
symengine/compatibility.py
is_sequence
def is_sequence(i, include=None): """ Return a boolean indicating whether ``i`` is a sequence in the SymPy sense. If anything that fails the test below should be included as being a sequence for your application, set 'include' to that object's type; multiple types should be passed as a tuple of types. Note: although generators can generate a sequence, they often need special handling to make sure their elements are captured before the generator is exhausted, so these are not included by default in the definition of a sequence. See also: iterable Examples ======== >>> from sympy.utilities.iterables import is_sequence >>> from types import GeneratorType >>> is_sequence([]) True >>> is_sequence(set()) False >>> is_sequence('abc') False >>> is_sequence('abc', include=str) True >>> generator = (c for c in 'abc') >>> is_sequence(generator) False >>> is_sequence(generator, include=(str, GeneratorType)) True """ return (hasattr(i, '__getitem__') and iterable(i) or bool(include) and isinstance(i, include))
python
def is_sequence(i, include=None): """ Return a boolean indicating whether ``i`` is a sequence in the SymPy sense. If anything that fails the test below should be included as being a sequence for your application, set 'include' to that object's type; multiple types should be passed as a tuple of types. Note: although generators can generate a sequence, they often need special handling to make sure their elements are captured before the generator is exhausted, so these are not included by default in the definition of a sequence. See also: iterable Examples ======== >>> from sympy.utilities.iterables import is_sequence >>> from types import GeneratorType >>> is_sequence([]) True >>> is_sequence(set()) False >>> is_sequence('abc') False >>> is_sequence('abc', include=str) True >>> generator = (c for c in 'abc') >>> is_sequence(generator) False >>> is_sequence(generator, include=(str, GeneratorType)) True """ return (hasattr(i, '__getitem__') and iterable(i) or bool(include) and isinstance(i, include))
[ "def", "is_sequence", "(", "i", ",", "include", "=", "None", ")", ":", "return", "(", "hasattr", "(", "i", ",", "'__getitem__'", ")", "and", "iterable", "(", "i", ")", "or", "bool", "(", "include", ")", "and", "isinstance", "(", "i", ",", "include", ")", ")" ]
Return a boolean indicating whether ``i`` is a sequence in the SymPy sense. If anything that fails the test below should be included as being a sequence for your application, set 'include' to that object's type; multiple types should be passed as a tuple of types. Note: although generators can generate a sequence, they often need special handling to make sure their elements are captured before the generator is exhausted, so these are not included by default in the definition of a sequence. See also: iterable Examples ======== >>> from sympy.utilities.iterables import is_sequence >>> from types import GeneratorType >>> is_sequence([]) True >>> is_sequence(set()) False >>> is_sequence('abc') False >>> is_sequence('abc', include=str) True >>> generator = (c for c in 'abc') >>> is_sequence(generator) False >>> is_sequence(generator, include=(str, GeneratorType)) True
[ "Return", "a", "boolean", "indicating", "whether", "i", "is", "a", "sequence", "in", "the", "SymPy", "sense", ".", "If", "anything", "that", "fails", "the", "test", "below", "should", "be", "included", "as", "being", "a", "sequence", "for", "your", "application", "set", "include", "to", "that", "object", "s", "type", ";", "multiple", "types", "should", "be", "passed", "as", "a", "tuple", "of", "types", "." ]
1366cf98ceaade339c5dd24ae3381a0e63ea9dad
https://github.com/symengine/symengine.py/blob/1366cf98ceaade339c5dd24ae3381a0e63ea9dad/symengine/compatibility.py#L245-L282
train