Search is not available for this dataset
identifier
stringlengths
1
155
parameters
stringlengths
2
6.09k
docstring
stringlengths
11
63.4k
docstring_summary
stringlengths
0
63.4k
function
stringlengths
29
99.8k
function_tokens
sequence
start_point
sequence
end_point
sequence
language
stringclasses
1 value
docstring_language
stringlengths
2
7
docstring_language_predictions
stringlengths
18
23
is_langid_reliable
stringclasses
2 values
GeneratePassword
(length=8, include_symbols=False)
Generates a random password.
Generates a random password.
def GeneratePassword(length=8, include_symbols=False): """Generates a random password.""" if length < MIN_LENGTH: raise InputError('Password length must be at least %d' % MIN_LENGTH) candidates = (CANDIDATES_WITH_SYMBOLS if include_symbols else CANDIDATES_WITHOUT_SYMBOLS) categories = (CATEGORIES_WITH_SYMBOLS if include_symbols else CATEGORIES_WITHOUT_SYMBOLS) # Generates up to the specified length minus the number of categories. # Then inserts one character for each category, ensuring that the character # satisfy the category if the generated string hasn't already. generated = ([random.choice(ALPHABET)] + [random.choice(candidates) for _ in range(length - 1 - len(categories))]) for category in categories: _InsertAndEnsureSatisfaction(generated, category, candidates) return ''.join(generated)
[ "def", "GeneratePassword", "(", "length", "=", "8", ",", "include_symbols", "=", "False", ")", ":", "if", "length", "<", "MIN_LENGTH", ":", "raise", "InputError", "(", "'Password length must be at least %d'", "%", "MIN_LENGTH", ")", "candidates", "=", "(", "CANDIDATES_WITH_SYMBOLS", "if", "include_symbols", "else", "CANDIDATES_WITHOUT_SYMBOLS", ")", "categories", "=", "(", "CATEGORIES_WITH_SYMBOLS", "if", "include_symbols", "else", "CATEGORIES_WITHOUT_SYMBOLS", ")", "# Generates up to the specified length minus the number of categories.", "# Then inserts one character for each category, ensuring that the character", "# satisfy the category if the generated string hasn't already.", "generated", "=", "(", "[", "random", ".", "choice", "(", "ALPHABET", ")", "]", "+", "[", "random", ".", "choice", "(", "candidates", ")", "for", "_", "in", "range", "(", "length", "-", "1", "-", "len", "(", "categories", ")", ")", "]", ")", "for", "category", "in", "categories", ":", "_InsertAndEnsureSatisfaction", "(", "generated", ",", "category", ",", "candidates", ")", "return", "''", ".", "join", "(", "generated", ")" ]
[ 87, 0 ]
[ 105, 27 ]
python
en
['en', 'ca', 'en']
True
_InsertAndEnsureSatisfaction
(generated, required, all_candidates)
Inserts 1 char into generated, satisfying required if not already. If the required characters are not already in the generated string, one will be inserted. If any required character is already in the generated string, a random character from all_candidates will be inserted. The insertion happens at a random location but not at the beginning. Args: generated: the string to be modified. required: list of required characters to check for. all_candidates: list of characters to choose from if the required characters are already satisfied.
Inserts 1 char into generated, satisfying required if not already.
def _InsertAndEnsureSatisfaction(generated, required, all_candidates): """Inserts 1 char into generated, satisfying required if not already. If the required characters are not already in the generated string, one will be inserted. If any required character is already in the generated string, a random character from all_candidates will be inserted. The insertion happens at a random location but not at the beginning. Args: generated: the string to be modified. required: list of required characters to check for. all_candidates: list of characters to choose from if the required characters are already satisfied. """ if set(generated).isdisjoint(required): # Not yet satisfied. Insert a required candidate. _InsertInto(generated, required) else: # Already satisfied. Insert any candidate. _InsertInto(generated, all_candidates)
[ "def", "_InsertAndEnsureSatisfaction", "(", "generated", ",", "required", ",", "all_candidates", ")", ":", "if", "set", "(", "generated", ")", ".", "isdisjoint", "(", "required", ")", ":", "# Not yet satisfied. Insert a required candidate.", "_InsertInto", "(", "generated", ",", "required", ")", "else", ":", "# Already satisfied. Insert any candidate.", "_InsertInto", "(", "generated", ",", "all_candidates", ")" ]
[ 108, 0 ]
[ 127, 42 ]
python
en
['en', 'en', 'en']
True
_InsertInto
(generated, candidates)
Inserts a random candidate into a random non-zero index of generated.
Inserts a random candidate into a random non-zero index of generated.
def _InsertInto(generated, candidates): """Inserts a random candidate into a random non-zero index of generated.""" # Avoids inserting at index 0, since the first character follows its own rule. generated.insert(random.randint(1, len(generated) - 1), random.choice(candidates))
[ "def", "_InsertInto", "(", "generated", ",", "candidates", ")", ":", "# Avoids inserting at index 0, since the first character follows its own rule.", "generated", ".", "insert", "(", "random", ".", "randint", "(", "1", ",", "len", "(", "generated", ")", "-", "1", ")", ",", "random", ".", "choice", "(", "candidates", ")", ")" ]
[ 130, 0 ]
[ 134, 45 ]
python
en
['en', 'en', 'en']
True
GUICalculator.create_button_layout
(self)
Creates the grid of calculator buttons.
Creates the grid of calculator buttons.
def create_button_layout(self): "Creates the grid of calculator buttons." labels = ["exit", "mrc", "m+", "m-", "clear", "(", ")", "!", "sqrt", "pow", "%", "/", "7", "8", "9", "*", "4", "5", "6", "-", "1", "2", "3", "+", "0", ".", "c", "="] buttons = {i: QtGui.QPushButton(i) for i in labels} for b in buttons.values(): b.clicked.connect(self.button_clicked) # Create our positions grid (0,0), (0,1) etc. pos = [(i, j) for i in range(7) for j in range(4)] layout = QtGui.QGridLayout() for i in range(len(labels)): layout.addWidget(buttons[labels[i]], pos[i][0], pos[i][1]) return layout
[ "def", "create_button_layout", "(", "self", ")", ":", "labels", "=", "[", "\"exit\"", ",", "\"mrc\"", ",", "\"m+\"", ",", "\"m-\"", ",", "\"clear\"", ",", "\"(\"", ",", "\")\"", ",", "\"!\"", ",", "\"sqrt\"", ",", "\"pow\"", ",", "\"%\"", ",", "\"/\"", ",", "\"7\"", ",", "\"8\"", ",", "\"9\"", ",", "\"*\"", ",", "\"4\"", ",", "\"5\"", ",", "\"6\"", ",", "\"-\"", ",", "\"1\"", ",", "\"2\"", ",", "\"3\"", ",", "\"+\"", ",", "\"0\"", ",", "\".\"", ",", "\"c\"", ",", "\"=\"", "]", "buttons", "=", "{", "i", ":", "QtGui", ".", "QPushButton", "(", "i", ")", "for", "i", "in", "labels", "}", "for", "b", "in", "buttons", ".", "values", "(", ")", ":", "b", ".", "clicked", ".", "connect", "(", "self", ".", "button_clicked", ")", "# Create our positions grid (0,0), (0,1) etc.", "pos", "=", "[", "(", "i", ",", "j", ")", "for", "i", "in", "range", "(", "7", ")", "for", "j", "in", "range", "(", "4", ")", "]", "layout", "=", "QtGui", ".", "QGridLayout", "(", ")", "for", "i", "in", "range", "(", "len", "(", "labels", ")", ")", ":", "layout", ".", "addWidget", "(", "buttons", "[", "labels", "[", "i", "]", "]", ",", "pos", "[", "i", "]", "[", "0", "]", ",", "pos", "[", "i", "]", "[", "1", "]", ")", "return", "layout" ]
[ 38, 4 ]
[ 62, 21 ]
python
en
['en', 'en', 'en']
True
SplitDatabase.__init__
(self, input_file, dir_folds=None, n_splits=10, sep_read='\t', sep_write='\t', header=None, names=None, as_binary=False, binary_col=None, write_mode='w')
Given a database, this class is responsible for creating a training and test sets for k folds with well-known strategies: - k-fold cross-validation - ShuffleSplit Usage: >> SplitDatabase(input_file=database, dir_folds=dir_path, n_folds=10).kfoldcrossvalidation() >> SplitDatabase(input_file=database, dir_folds=dir_path, n_folds=10).shuffle_split(test_size=0.3) # To use only one fold, you should use only shuffle_split. kfoldcrossvalidation works only with # n_folds >= 2: >> SplitDatabase(input_file=database, dir_folds=dir_path, n_folds=1).shuffle_split(test_size=0.1) :param input_file: Input File with at least 2 columns. :type input_file: str :param dir_folds: Directory to write folds (train and test files) :type dir_folds: str :param n_splits: How much folds the strategy will divide :type n_splits: int, default 10 :param sep_read: Delimiter for input files :type sep_read: str, default '\t' :param sep_write: Delimiter for output files :type sep_write: str, default '\t' :param header: Skip header line (only work with method: read_with_pandas) :type header: int, default None :param names: Name of columns (only work with method: read_with_pandas) :type names: str, default None :param as_binary: If True, the explicit feedback will be transform to binary :type as_binary: bool, default False :param binary_col: Index of columns to read as binary (only work with method: read_with_pandas) :type binary_col: int, default 2 :param write_mode: Method to write file :type write_mode: str, default 'w'
Given a database, this class is responsible for creating a training and test sets for k folds with well-known strategies:
def __init__(self, input_file, dir_folds=None, n_splits=10, sep_read='\t', sep_write='\t', header=None, names=None, as_binary=False, binary_col=None, write_mode='w'): """ Given a database, this class is responsible for creating a training and test sets for k folds with well-known strategies: - k-fold cross-validation - ShuffleSplit Usage: >> SplitDatabase(input_file=database, dir_folds=dir_path, n_folds=10).kfoldcrossvalidation() >> SplitDatabase(input_file=database, dir_folds=dir_path, n_folds=10).shuffle_split(test_size=0.3) # To use only one fold, you should use only shuffle_split. kfoldcrossvalidation works only with # n_folds >= 2: >> SplitDatabase(input_file=database, dir_folds=dir_path, n_folds=1).shuffle_split(test_size=0.1) :param input_file: Input File with at least 2 columns. :type input_file: str :param dir_folds: Directory to write folds (train and test files) :type dir_folds: str :param n_splits: How much folds the strategy will divide :type n_splits: int, default 10 :param sep_read: Delimiter for input files :type sep_read: str, default '\t' :param sep_write: Delimiter for output files :type sep_write: str, default '\t' :param header: Skip header line (only work with method: read_with_pandas) :type header: int, default None :param names: Name of columns (only work with method: read_with_pandas) :type names: str, default None :param as_binary: If True, the explicit feedback will be transform to binary :type as_binary: bool, default False :param binary_col: Index of columns to read as binary (only work with method: read_with_pandas) :type binary_col: int, default 2 :param write_mode: Method to write file :type write_mode: str, default 'w' """ super(SplitDatabase, self).__init__(input_file, sep=sep_read, header=header, names=names, as_binary=as_binary, binary_col=binary_col) self.dir_folds = dir_folds self.n_splits = n_splits self.sep_write = sep_write self.write_mode = write_mode self.df = self.read_with_pandas() if self.dir_folds is not None: self.create_folds()
[ "def", "__init__", "(", "self", ",", "input_file", ",", "dir_folds", "=", "None", ",", "n_splits", "=", "10", ",", "sep_read", "=", "'\\t'", ",", "sep_write", "=", "'\\t'", ",", "header", "=", "None", ",", "names", "=", "None", ",", "as_binary", "=", "False", ",", "binary_col", "=", "None", ",", "write_mode", "=", "'w'", ")", ":", "super", "(", "SplitDatabase", ",", "self", ")", ".", "__init__", "(", "input_file", ",", "sep", "=", "sep_read", ",", "header", "=", "header", ",", "names", "=", "names", ",", "as_binary", "=", "as_binary", ",", "binary_col", "=", "binary_col", ")", "self", ".", "dir_folds", "=", "dir_folds", "self", ".", "n_splits", "=", "n_splits", "self", ".", "sep_write", "=", "sep_write", "self", ".", "write_mode", "=", "write_mode", "self", ".", "df", "=", "self", ".", "read_with_pandas", "(", ")", "if", "self", ".", "dir_folds", "is", "not", "None", ":", "self", ".", "create_folds", "(", ")" ]
[ 19, 4 ]
[ 78, 31 ]
python
en
['en', 'error', 'th']
False
SplitDatabase.kfoldcrossvalidation
(self, shuffle=True, random_state=None)
k-fold cross-validation In k-fold cross-validation, the original sample is randomly partitioned into k equal sized subsamples. Of the k subsamples, a single subsample is retained as the validation data for testing the model, and the remaining k − 1 subsamples are used as training data. The cross-validation process is then repeated k times (the folds), with each of the k subsamples used exactly once as the validation data. The k results from the folds can then be averaged (or otherwise combined) to produce a single estimation. Reference: https://en.wikipedia.org/wiki/Cross-validation_(statistics) :param shuffle: :type shuffle: :param random_state: :type random_state: :return:
k-fold cross-validation
def kfoldcrossvalidation(self, shuffle=True, random_state=None): """ k-fold cross-validation In k-fold cross-validation, the original sample is randomly partitioned into k equal sized subsamples. Of the k subsamples, a single subsample is retained as the validation data for testing the model, and the remaining k − 1 subsamples are used as training data. The cross-validation process is then repeated k times (the folds), with each of the k subsamples used exactly once as the validation data. The k results from the folds can then be averaged (or otherwise combined) to produce a single estimation. Reference: https://en.wikipedia.org/wiki/Cross-validation_(statistics) :param shuffle: :type shuffle: :param random_state: :type random_state: :return: """ kfold = KFold(n_splits=self.n_splits, shuffle=shuffle, random_state=random_state) trained_model = list(kfold.split(self.df)) if self.dir_folds is not None: self.write_files(trained_model) return trained_model
[ "def", "kfoldcrossvalidation", "(", "self", ",", "shuffle", "=", "True", ",", "random_state", "=", "None", ")", ":", "kfold", "=", "KFold", "(", "n_splits", "=", "self", ".", "n_splits", ",", "shuffle", "=", "shuffle", ",", "random_state", "=", "random_state", ")", "trained_model", "=", "list", "(", "kfold", ".", "split", "(", "self", ".", "df", ")", ")", "if", "self", ".", "dir_folds", "is", "not", "None", ":", "self", ".", "write_files", "(", "trained_model", ")", "return", "trained_model" ]
[ 106, 4 ]
[ 134, 28 ]
python
en
['en', 'error', 'th']
False
SplitDatabase.shuffle_split
(self, test_size=0.1, random_state=None)
Shuffle Split Random permutation cross-validator Yields indices to split data into training and test sets. Note: contrary to other cross-validation strategies, random splits do not guarantee that all folds will be different, although this is still very likely for sizeable databases. :param test_size: :type test_size: :param random_state: :type random_state: :return:
Shuffle Split
def shuffle_split(self, test_size=0.1, random_state=None): """ Shuffle Split Random permutation cross-validator Yields indices to split data into training and test sets. Note: contrary to other cross-validation strategies, random splits do not guarantee that all folds will be different, although this is still very likely for sizeable databases. :param test_size: :type test_size: :param random_state: :type random_state: :return: """ ss = ShuffleSplit(n_splits=self.n_splits, test_size=test_size, random_state=random_state) trained_model = list(ss.split(self.df)) if self.dir_folds is not None: self.write_files(trained_model) return trained_model
[ "def", "shuffle_split", "(", "self", ",", "test_size", "=", "0.1", ",", "random_state", "=", "None", ")", ":", "ss", "=", "ShuffleSplit", "(", "n_splits", "=", "self", ".", "n_splits", ",", "test_size", "=", "test_size", ",", "random_state", "=", "random_state", ")", "trained_model", "=", "list", "(", "ss", ".", "split", "(", "self", ".", "df", ")", ")", "if", "self", ".", "dir_folds", "is", "not", "None", ":", "self", ".", "write_files", "(", "trained_model", ")", "return", "trained_model" ]
[ 136, 4 ]
[ 161, 28 ]
python
en
['en', 'error', 'th']
False
compute_hex_hash
(s, algorithm=SIGNATURE_SHA1)
Computes string hash using specified algorithm and return HEX string representation of hash. :param s: String to compute hash for :param algorithm: The name of algorithm to use for computing hash :return: HEX string of computed hash value
Computes string hash using specified algorithm and return HEX string representation of hash.
def compute_hex_hash(s, algorithm=SIGNATURE_SHA1): """ Computes string hash using specified algorithm and return HEX string representation of hash. :param s: String to compute hash for :param algorithm: The name of algorithm to use for computing hash :return: HEX string of computed hash value """ try: hash_fn = signature_algorithms[algorithm] except KeyError: raise ValueError('Unsupported hash algorithm: {}'.format(algorithm)) return hash_fn(to_bytes(s)).hexdigest()
[ "def", "compute_hex_hash", "(", "s", ",", "algorithm", "=", "SIGNATURE_SHA1", ")", ":", "try", ":", "hash_fn", "=", "signature_algorithms", "[", "algorithm", "]", "except", "KeyError", ":", "raise", "ValueError", "(", "'Unsupported hash algorithm: {}'", ".", "format", "(", "algorithm", ")", ")", "return", "hash_fn", "(", "to_bytes", "(", "s", ")", ")", ".", "hexdigest", "(", ")" ]
[ 140, 0 ]
[ 153, 43 ]
python
en
['en', 'error', 'th']
False
build_list_of_dicts
(val)
Converts a value that can be presented as a list of dict. In case top level item is not a list, it is wrapped with a list Valid values examples: - Valid dict: {"k": "v", "k2","v2"} - List of dict: [{"k": "v"}, {"k2","v2"}] - JSON decodable string: '{"k": "v"}', or '[{"k": "v"}]' - List of JSON decodable strings: ['{"k": "v"}', '{"k2","v2"}'] Invalid values examples: - ["not", "a", "dict"] - [123, None], - [["another", "list"]] :param val: Input value :type val: Union[list, dict, str] :return: Converted(or original) list of dict :raises: ValueError in case value cannot be converted to a list of dict
Converts a value that can be presented as a list of dict.
def build_list_of_dicts(val): """ Converts a value that can be presented as a list of dict. In case top level item is not a list, it is wrapped with a list Valid values examples: - Valid dict: {"k": "v", "k2","v2"} - List of dict: [{"k": "v"}, {"k2","v2"}] - JSON decodable string: '{"k": "v"}', or '[{"k": "v"}]' - List of JSON decodable strings: ['{"k": "v"}', '{"k2","v2"}'] Invalid values examples: - ["not", "a", "dict"] - [123, None], - [["another", "list"]] :param val: Input value :type val: Union[list, dict, str] :return: Converted(or original) list of dict :raises: ValueError in case value cannot be converted to a list of dict """ if val is None: return [] if isinstance(val, str): # use OrderedDict to preserve order val = json.loads(val, object_pairs_hook=OrderedDict) if isinstance(val, dict): val = [val] for index, item in enumerate(val): if isinstance(item, str): # use OrderedDict to preserve order val[index] = json.loads(item, object_pairs_hook=OrderedDict) if not isinstance(val[index], dict): raise ValueError("Expected a list of dicts") return val
[ "def", "build_list_of_dicts", "(", "val", ")", ":", "if", "val", "is", "None", ":", "return", "[", "]", "if", "isinstance", "(", "val", ",", "str", ")", ":", "# use OrderedDict to preserve order", "val", "=", "json", ".", "loads", "(", "val", ",", "object_pairs_hook", "=", "OrderedDict", ")", "if", "isinstance", "(", "val", ",", "dict", ")", ":", "val", "=", "[", "val", "]", "for", "index", ",", "item", "in", "enumerate", "(", "val", ")", ":", "if", "isinstance", "(", "item", ",", "str", ")", ":", "# use OrderedDict to preserve order", "val", "[", "index", "]", "=", "json", ".", "loads", "(", "item", ",", "object_pairs_hook", "=", "OrderedDict", ")", "if", "not", "isinstance", "(", "val", "[", "index", "]", ",", "dict", ")", ":", "raise", "ValueError", "(", "\"Expected a list of dicts\"", ")", "return", "val" ]
[ 165, 0 ]
[ 204, 14 ]
python
en
['en', 'error', 'th']
False
normalize_context_value
(value)
Escape "=" and "|" delimiter characters and json encode lists :param value: Value to escape :type value: int or str or list or tuple :return: The normalized value :rtype: str
Escape "=" and "|" delimiter characters and json encode lists
def normalize_context_value(value): """ Escape "=" and "|" delimiter characters and json encode lists :param value: Value to escape :type value: int or str or list or tuple :return: The normalized value :rtype: str """ if isinstance(value, (list, tuple)): value = json_encode(value) return str(value).replace("=", "\\=").replace("|", "\\|")
[ "def", "normalize_context_value", "(", "value", ")", ":", "if", "isinstance", "(", "value", ",", "(", "list", ",", "tuple", ")", ")", ":", "value", "=", "json_encode", "(", "value", ")", "return", "str", "(", "value", ")", ".", "replace", "(", "\"=\"", ",", "\"\\\\=\"", ")", ".", "replace", "(", "\"|\"", ",", "\"\\\\|\"", ")" ]
[ 226, 0 ]
[ 240, 61 ]
python
en
['en', 'error', 'th']
False
encode_context
(context)
Encode metadata fields based on incoming value. List and tuple values are encoded to json strings. :param context: dict of context to be encoded :return: a joined string of all keys and values properly escaped and separated by a pipe character
Encode metadata fields based on incoming value.
def encode_context(context): """ Encode metadata fields based on incoming value. List and tuple values are encoded to json strings. :param context: dict of context to be encoded :return: a joined string of all keys and values properly escaped and separated by a pipe character """ if not isinstance(context, dict): return context return "|".join(("{}={}".format(k, normalize_context_value(v))) for k, v in iteritems(context))
[ "def", "encode_context", "(", "context", ")", ":", "if", "not", "isinstance", "(", "context", ",", "dict", ")", ":", "return", "context", "return", "\"|\"", ".", "join", "(", "(", "\"{}={}\"", ".", "format", "(", "k", ",", "normalize_context_value", "(", "v", ")", ")", ")", "for", "k", ",", "v", "in", "iteritems", "(", "context", ")", ")" ]
[ 243, 0 ]
[ 256, 99 ]
python
en
['en', 'error', 'th']
False
json_encode
(value)
Converts value to a json encoded string :param value: value to be encoded :return: JSON encoded string
Converts value to a json encoded string
def json_encode(value): """ Converts value to a json encoded string :param value: value to be encoded :return: JSON encoded string """ return json.dumps(value, default=__json_serializer, separators=(',', ':'))
[ "def", "json_encode", "(", "value", ")", ":", "return", "json", ".", "dumps", "(", "value", ",", "default", "=", "__json_serializer", ",", "separators", "=", "(", "','", ",", "':'", ")", ")" ]
[ 259, 0 ]
[ 267, 78 ]
python
en
['en', 'error', 'th']
False
encode_date_to_usage_api_format
(date_obj)
Encodes date object to `dd-mm-yyyy` format string :param date_obj: datetime.date object to encode :return: Encoded date as a string
Encodes date object to `dd-mm-yyyy` format string
def encode_date_to_usage_api_format(date_obj): """ Encodes date object to `dd-mm-yyyy` format string :param date_obj: datetime.date object to encode :return: Encoded date as a string """ return date_obj.strftime('%d-%m-%Y')
[ "def", "encode_date_to_usage_api_format", "(", "date_obj", ")", ":", "return", "date_obj", ".", "strftime", "(", "'%d-%m-%Y'", ")" ]
[ 270, 0 ]
[ 278, 40 ]
python
en
['en', 'error', 'th']
False
patch_fetch_format
(options)
When upload type is fetch, remove the format options. In addition, set the fetch_format options to the format value unless it was already set. Mutates the options parameter! :param options: URL and transformation options
When upload type is fetch, remove the format options. In addition, set the fetch_format options to the format value unless it was already set. Mutates the options parameter!
def patch_fetch_format(options): """ When upload type is fetch, remove the format options. In addition, set the fetch_format options to the format value unless it was already set. Mutates the options parameter! :param options: URL and transformation options """ if options.get("type", "upload") != "fetch": return resource_format = options.pop("format", None) if "fetch_format" not in options: options["fetch_format"] = resource_format
[ "def", "patch_fetch_format", "(", "options", ")", ":", "if", "options", ".", "get", "(", "\"type\"", ",", "\"upload\"", ")", "!=", "\"fetch\"", ":", "return", "resource_format", "=", "options", ".", "pop", "(", "\"format\"", ",", "None", ")", "if", "\"fetch_format\"", "not", "in", "options", ":", "options", "[", "\"fetch_format\"", "]", "=", "resource_format" ]
[ 281, 0 ]
[ 294, 49 ]
python
en
['en', 'error', 'th']
False
chain_transformations
(options, transformations)
Helper function, allows chaining transformations to the end of transformations list The result of this function is an updated options parameter :param options: Original options :param transformations: Transformations to chain at the end :return: Resulting options
Helper function, allows chaining transformations to the end of transformations list
def chain_transformations(options, transformations): """ Helper function, allows chaining transformations to the end of transformations list The result of this function is an updated options parameter :param options: Original options :param transformations: Transformations to chain at the end :return: Resulting options """ transformations = copy.deepcopy(transformations) transformations = build_array(transformations) # preserve url options url_options = dict((o, options[o]) for o in __URL_KEYS if o in options) transformations.insert(0, options) url_options["transformation"] = transformations return url_options
[ "def", "chain_transformations", "(", "options", ",", "transformations", ")", ":", "transformations", "=", "copy", ".", "deepcopy", "(", "transformations", ")", "transformations", "=", "build_array", "(", "transformations", ")", "# preserve url options", "url_options", "=", "dict", "(", "(", "o", ",", "options", "[", "o", "]", ")", "for", "o", "in", "__URL_KEYS", "if", "o", "in", "options", ")", "transformations", ".", "insert", "(", "0", ",", "options", ")", "url_options", "[", "\"transformation\"", "]", "=", "transformations", "return", "url_options" ]
[ 460, 0 ]
[ 482, 22 ]
python
en
['en', 'error', 'th']
False
unsigned_download_url_prefix
(source, cloud_name, private_cdn, cdn_subdomain, secure_cdn_subdomain, cname, secure, secure_distribution)
cdn_subdomain and secure_cdn_subdomain 1) Customers in shared distribution (e.g. res.cloudinary.com) if cdn_domain is true uses res-[1-5].cloudinary.com for both http and https. Setting secure_cdn_subdomain to false disables this for https. 2) Customers with private cdn if cdn_domain is true uses cloudname-res-[1-5].cloudinary.com for http if secure_cdn_domain is true uses cloudname-res-[1-5].cloudinary.com for https (please contact support if you require this) 3) Customers with cname if cdn_domain is true uses a[1-5].cname for http. For https, uses the same naming scheme as 1 for shared distribution and as 2 for private distribution.
cdn_subdomain and secure_cdn_subdomain 1) Customers in shared distribution (e.g. res.cloudinary.com) if cdn_domain is true uses res-[1-5].cloudinary.com for both http and https. Setting secure_cdn_subdomain to false disables this for https. 2) Customers with private cdn if cdn_domain is true uses cloudname-res-[1-5].cloudinary.com for http if secure_cdn_domain is true uses cloudname-res-[1-5].cloudinary.com for https (please contact support if you require this) 3) Customers with cname if cdn_domain is true uses a[1-5].cname for http. For https, uses the same naming scheme as 1 for shared distribution and as 2 for private distribution.
def unsigned_download_url_prefix(source, cloud_name, private_cdn, cdn_subdomain, secure_cdn_subdomain, cname, secure, secure_distribution): """cdn_subdomain and secure_cdn_subdomain 1) Customers in shared distribution (e.g. res.cloudinary.com) if cdn_domain is true uses res-[1-5].cloudinary.com for both http and https. Setting secure_cdn_subdomain to false disables this for https. 2) Customers with private cdn if cdn_domain is true uses cloudname-res-[1-5].cloudinary.com for http if secure_cdn_domain is true uses cloudname-res-[1-5].cloudinary.com for https (please contact support if you require this) 3) Customers with cname if cdn_domain is true uses a[1-5].cname for http. For https, uses the same naming scheme as 1 for shared distribution and as 2 for private distribution.""" shared_domain = not private_cdn shard = __crc(source) if secure: if secure_distribution is None or secure_distribution == cloudinary.OLD_AKAMAI_SHARED_CDN: secure_distribution = cloud_name + "-res.cloudinary.com" \ if private_cdn else cloudinary.SHARED_CDN shared_domain = shared_domain or secure_distribution == cloudinary.SHARED_CDN if secure_cdn_subdomain is None and shared_domain: secure_cdn_subdomain = cdn_subdomain if secure_cdn_subdomain: secure_distribution = re.sub('res.cloudinary.com', "res-" + shard + ".cloudinary.com", secure_distribution) prefix = "https://" + secure_distribution elif cname: subdomain = "a" + shard + "." if cdn_subdomain else "" prefix = "http://" + subdomain + cname else: subdomain = cloud_name + "-res" if private_cdn else "res" if cdn_subdomain: subdomain = subdomain + "-" + shard prefix = "http://" + subdomain + ".cloudinary.com" if shared_domain: prefix += "/" + cloud_name return prefix
[ "def", "unsigned_download_url_prefix", "(", "source", ",", "cloud_name", ",", "private_cdn", ",", "cdn_subdomain", ",", "secure_cdn_subdomain", ",", "cname", ",", "secure", ",", "secure_distribution", ")", ":", "shared_domain", "=", "not", "private_cdn", "shard", "=", "__crc", "(", "source", ")", "if", "secure", ":", "if", "secure_distribution", "is", "None", "or", "secure_distribution", "==", "cloudinary", ".", "OLD_AKAMAI_SHARED_CDN", ":", "secure_distribution", "=", "cloud_name", "+", "\"-res.cloudinary.com\"", "if", "private_cdn", "else", "cloudinary", ".", "SHARED_CDN", "shared_domain", "=", "shared_domain", "or", "secure_distribution", "==", "cloudinary", ".", "SHARED_CDN", "if", "secure_cdn_subdomain", "is", "None", "and", "shared_domain", ":", "secure_cdn_subdomain", "=", "cdn_subdomain", "if", "secure_cdn_subdomain", ":", "secure_distribution", "=", "re", ".", "sub", "(", "'res.cloudinary.com'", ",", "\"res-\"", "+", "shard", "+", "\".cloudinary.com\"", ",", "secure_distribution", ")", "prefix", "=", "\"https://\"", "+", "secure_distribution", "elif", "cname", ":", "subdomain", "=", "\"a\"", "+", "shard", "+", "\".\"", "if", "cdn_subdomain", "else", "\"\"", "prefix", "=", "\"http://\"", "+", "subdomain", "+", "cname", "else", ":", "subdomain", "=", "cloud_name", "+", "\"-res\"", "if", "private_cdn", "else", "\"res\"", "if", "cdn_subdomain", ":", "subdomain", "=", "subdomain", "+", "\"-\"", "+", "shard", "prefix", "=", "\"http://\"", "+", "subdomain", "+", "\".cloudinary.com\"", "if", "shared_domain", ":", "prefix", "+=", "\"/\"", "+", "cloud_name", "return", "prefix" ]
[ 646, 0 ]
[ 687, 17 ]
python
en
['en', 'gd', 'en']
True
cloudinary_scaled_url
(source, width, transformation, options)
Generates a cloudinary url scaled to specified width. :param source: The resource :param width: Width in pixels of the srcset item :param transformation: Custom transformation that overrides transformations provided in options :param options: A dict with additional options :return: Resulting URL of the item
Generates a cloudinary url scaled to specified width.
def cloudinary_scaled_url(source, width, transformation, options): """ Generates a cloudinary url scaled to specified width. :param source: The resource :param width: Width in pixels of the srcset item :param transformation: Custom transformation that overrides transformations provided in options :param options: A dict with additional options :return: Resulting URL of the item """ # preserve options from being destructed options = copy.deepcopy(options) if transformation: if isinstance(transformation, string_types): transformation = {"raw_transformation": transformation} # Remove all transformation related options options = dict((o, options[o]) for o in __URL_KEYS if o in options) options.update(transformation) scale_transformation = {"crop": "scale", "width": width} url_options = options patch_fetch_format(url_options) url_options = chain_transformations(url_options, scale_transformation) return cloudinary_url(source, **url_options)[0]
[ "def", "cloudinary_scaled_url", "(", "source", ",", "width", ",", "transformation", ",", "options", ")", ":", "# preserve options from being destructed", "options", "=", "copy", ".", "deepcopy", "(", "options", ")", "if", "transformation", ":", "if", "isinstance", "(", "transformation", ",", "string_types", ")", ":", "transformation", "=", "{", "\"raw_transformation\"", ":", "transformation", "}", "# Remove all transformation related options", "options", "=", "dict", "(", "(", "o", ",", "options", "[", "o", "]", ")", "for", "o", "in", "__URL_KEYS", "if", "o", "in", "options", ")", "options", ".", "update", "(", "transformation", ")", "scale_transformation", "=", "{", "\"crop\"", ":", "\"scale\"", ",", "\"width\"", ":", "width", "}", "url_options", "=", "options", "patch_fetch_format", "(", "url_options", ")", "url_options", "=", "chain_transformations", "(", "url_options", ",", "scale_transformation", ")", "return", "cloudinary_url", "(", "source", ",", "*", "*", "url_options", ")", "[", "0", "]" ]
[ 814, 0 ]
[ 843, 51 ]
python
en
['en', 'error', 'th']
False
smart_escape
(source, unsafe=r"([^a-zA-Z0-9_.\-\/:]+)")
Based on ruby's CGI::unescape. In addition does not escape / : :param source: Source string to escape :param unsafe: Unsafe characters :return: Escaped string
Based on ruby's CGI::unescape. In addition does not escape / :
def smart_escape(source, unsafe=r"([^a-zA-Z0-9_.\-\/:]+)"): """ Based on ruby's CGI::unescape. In addition does not escape / : :param source: Source string to escape :param unsafe: Unsafe characters :return: Escaped string """ def pack(m): return to_bytes('%' + "%".join( ["%02X" % x for x in struct.unpack('B' * len(m.group(1)), m.group(1))] ).upper()) return to_string(re.sub(to_bytes(unsafe), pack, to_bytes(source)))
[ "def", "smart_escape", "(", "source", ",", "unsafe", "=", "r\"([^a-zA-Z0-9_.\\-\\/:]+)\"", ")", ":", "def", "pack", "(", "m", ")", ":", "return", "to_bytes", "(", "'%'", "+", "\"%\"", ".", "join", "(", "[", "\"%02X\"", "%", "x", "for", "x", "in", "struct", ".", "unpack", "(", "'B'", "*", "len", "(", "m", ".", "group", "(", "1", ")", ")", ",", "m", ".", "group", "(", "1", ")", ")", "]", ")", ".", "upper", "(", ")", ")", "return", "to_string", "(", "re", ".", "sub", "(", "to_bytes", "(", "unsafe", ")", ",", "pack", ",", "to_bytes", "(", "source", ")", ")", ")" ]
[ 846, 0 ]
[ 860, 70 ]
python
en
['en', 'error', 'th']
False
download_folder
(folder_path, **options)
Creates and returns a URL that when invoked creates an archive of a folder. :param folder_path: The full path from the root that is used to generate download url. :type folder_path: str :param options: Additional options. :type options: dict, optional :return: Signed URL to download the folder. :rtype: str
Creates and returns a URL that when invoked creates an archive of a folder. :param folder_path: The full path from the root that is used to generate download url. :type folder_path: str :param options: Additional options. :type options: dict, optional :return: Signed URL to download the folder. :rtype: str
def download_folder(folder_path, **options): """ Creates and returns a URL that when invoked creates an archive of a folder. :param folder_path: The full path from the root that is used to generate download url. :type folder_path: str :param options: Additional options. :type options: dict, optional :return: Signed URL to download the folder. :rtype: str """ options["prefixes"] = folder_path options.setdefault("resource_type", "all") return download_archive_url(**options)
[ "def", "download_folder", "(", "folder_path", ",", "*", "*", "options", ")", ":", "options", "[", "\"prefixes\"", "]", "=", "folder_path", "options", ".", "setdefault", "(", "\"resource_type\"", ",", "\"all\"", ")", "return", "download_archive_url", "(", "*", "*", "options", ")" ]
[ 921, 0 ]
[ 934, 42 ]
python
en
['en', 'error', 'th']
False
download_backedup_asset
(asset_id, version_id, **options)
The returned url allows downloading the backedup asset based on the the asset ID and the version ID. Parameters asset_id and version_id are returned with api.resource(<PUBLIC_ID1>, versions=True) API call. :param asset_id: The asset ID of the asset. :type asset_id: str :param version_id: The version ID of the asset. :type version_id: str :param options: Additional options. :type options: dict, optional :return:The signed URL for downloading backup version of the asset. :rtype: str
The returned url allows downloading the backedup asset based on the the asset ID and the version ID.
def download_backedup_asset(asset_id, version_id, **options): """ The returned url allows downloading the backedup asset based on the the asset ID and the version ID. Parameters asset_id and version_id are returned with api.resource(<PUBLIC_ID1>, versions=True) API call. :param asset_id: The asset ID of the asset. :type asset_id: str :param version_id: The version ID of the asset. :type version_id: str :param options: Additional options. :type options: dict, optional :return:The signed URL for downloading backup version of the asset. :rtype: str """ params = { "timestamp": options.get("timestamp", now()), "asset_id": asset_id, "version_id": version_id } cloudinary_params = sign_request(params, options) return base_api_url("download_backup", **options) + "?" + urlencode(bracketize_seq(cloudinary_params), True)
[ "def", "download_backedup_asset", "(", "asset_id", ",", "version_id", ",", "*", "*", "options", ")", ":", "params", "=", "{", "\"timestamp\"", ":", "options", ".", "get", "(", "\"timestamp\"", ",", "now", "(", ")", ")", ",", "\"asset_id\"", ":", "asset_id", ",", "\"version_id\"", ":", "version_id", "}", "cloudinary_params", "=", "sign_request", "(", "params", ",", "options", ")", "return", "base_api_url", "(", "\"download_backup\"", ",", "*", "*", "options", ")", "+", "\"?\"", "+", "urlencode", "(", "bracketize_seq", "(", "cloudinary_params", ")", ",", "True", ")" ]
[ 937, 0 ]
[ 959, 112 ]
python
en
['en', 'error', 'th']
False
build_single_eager
(options)
Builds a single eager transformation which consists of transformation and (optionally) format joined by "/" :param options: Options containing transformation parameters and (optionally) a "format" key format can be a string value (jpg, gif, etc) or can be set to "" (empty string). The latter leads to transformation ending with "/", which means "No extension, use original format" If format is not provided or set to None, only transformation is used (without the trailing "/") :return: Resulting eager transformation string
Builds a single eager transformation which consists of transformation and (optionally) format joined by "/"
def build_single_eager(options): """ Builds a single eager transformation which consists of transformation and (optionally) format joined by "/" :param options: Options containing transformation parameters and (optionally) a "format" key format can be a string value (jpg, gif, etc) or can be set to "" (empty string). The latter leads to transformation ending with "/", which means "No extension, use original format" If format is not provided or set to None, only transformation is used (without the trailing "/") :return: Resulting eager transformation string """ if isinstance(options, string_types): return options trans_str = generate_transformation_string(**options)[0] if not trans_str: return "" file_format = options.get("format") return trans_str + ("/" + file_format if file_format is not None else "")
[ "def", "build_single_eager", "(", "options", ")", ":", "if", "isinstance", "(", "options", ",", "string_types", ")", ":", "return", "options", "trans_str", "=", "generate_transformation_string", "(", "*", "*", "options", ")", "[", "0", "]", "if", "not", "trans_str", ":", "return", "\"\"", "file_format", "=", "options", ".", "get", "(", "\"format\"", ")", "return", "trans_str", "+", "(", "\"/\"", "+", "file_format", "if", "file_format", "is", "not", "None", "else", "\"\"", ")" ]
[ 1006, 0 ]
[ 1027, 77 ]
python
en
['en', 'error', 'th']
False
build_multi_and_sprite_params
(**options)
Build params for multi, download_multi, generate_sprite, and download_generated_sprite methods
Build params for multi, download_multi, generate_sprite, and download_generated_sprite methods
def build_multi_and_sprite_params(**options): """ Build params for multi, download_multi, generate_sprite, and download_generated_sprite methods """ tag = options.get("tag") urls = options.get("urls") if bool(tag) == bool(urls): raise ValueError("Either 'tag' or 'urls' parameter has to be set but not both") params = { "mode": options.get("mode"), "timestamp": now(), "async": options.get("async"), "notification_url": options.get("notification_url"), "tag": tag, "urls": urls, "transformation": generate_transformation_string(fetch_format=options.get("format"), **options)[0] } return params
[ "def", "build_multi_and_sprite_params", "(", "*", "*", "options", ")", ":", "tag", "=", "options", ".", "get", "(", "\"tag\"", ")", "urls", "=", "options", ".", "get", "(", "\"urls\"", ")", "if", "bool", "(", "tag", ")", "==", "bool", "(", "urls", ")", ":", "raise", "ValueError", "(", "\"Either 'tag' or 'urls' parameter has to be set but not both\"", ")", "params", "=", "{", "\"mode\"", ":", "options", ".", "get", "(", "\"mode\"", ")", ",", "\"timestamp\"", ":", "now", "(", ")", ",", "\"async\"", ":", "options", ".", "get", "(", "\"async\"", ")", ",", "\"notification_url\"", ":", "options", ".", "get", "(", "\"notification_url\"", ")", ",", "\"tag\"", ":", "tag", ",", "\"urls\"", ":", "urls", ",", "\"transformation\"", ":", "generate_transformation_string", "(", "fetch_format", "=", "options", ".", "get", "(", "\"format\"", ")", ",", "*", "*", "options", ")", "[", "0", "]", "}", "return", "params" ]
[ 1070, 0 ]
[ 1087, 17 ]
python
en
['en', 'error', 'th']
False
process_fps
(fps)
Serializes fps transformation parameter :param fps: A single number, a list of mixed type, a string, including open-ended and closed range values Examples: '24-29.97', 24, 24.973, '-24', [24, 29.97] :return: string
Serializes fps transformation parameter
def process_fps(fps): """ Serializes fps transformation parameter :param fps: A single number, a list of mixed type, a string, including open-ended and closed range values Examples: '24-29.97', 24, 24.973, '-24', [24, 29.97] :return: string """ if not isinstance(fps, (list, tuple)): return fps return "-".join(normalize_expression(f) for f in fps)
[ "def", "process_fps", "(", "fps", ")", ":", "if", "not", "isinstance", "(", "fps", ",", "(", "list", ",", "tuple", ")", ")", ":", "return", "fps", "return", "\"-\"", ".", "join", "(", "normalize_expression", "(", "f", ")", "for", "f", "in", "fps", ")" ]
[ 1277, 0 ]
[ 1289, 57 ]
python
en
['en', 'error', 'th']
False
process_ki
(ki)
Serializes keyframe_interval parameter :param ki: Keyframe interval. Should be either a string or a positive real number. :return: string
Serializes keyframe_interval parameter :param ki: Keyframe interval. Should be either a string or a positive real number. :return: string
def process_ki(ki): """ Serializes keyframe_interval parameter :param ki: Keyframe interval. Should be either a string or a positive real number. :return: string """ if ki is None: return None if isinstance(ki, string_types): return ki if not isinstance(ki, Number): raise ValueError("Keyframe interval should be a number or a string") if ki <= 0: raise ValueError("Keyframe interval should be greater than zero") return str(float(ki))
[ "def", "process_ki", "(", "ki", ")", ":", "if", "ki", "is", "None", ":", "return", "None", "if", "isinstance", "(", "ki", ",", "string_types", ")", ":", "return", "ki", "if", "not", "isinstance", "(", "ki", ",", "Number", ")", ":", "raise", "ValueError", "(", "\"Keyframe interval should be a number or a string\"", ")", "if", "ki", "<=", "0", ":", "raise", "ValueError", "(", "\"Keyframe interval should be greater than zero\"", ")", "return", "str", "(", "float", "(", "ki", ")", ")" ]
[ 1292, 0 ]
[ 1306, 25 ]
python
en
['en', 'error', 'th']
False
base64_encode_url
(url)
Returns the Base64-decoded version of url. The method tries to unquote the url because quoting it :param str url: the url to encode. the value is URIdecoded and then re-encoded before converting to base64 representation
Returns the Base64-decoded version of url. The method tries to unquote the url because quoting it
def base64_encode_url(url): """ Returns the Base64-decoded version of url. The method tries to unquote the url because quoting it :param str url: the url to encode. the value is URIdecoded and then re-encoded before converting to base64 representation """ try: url = unquote(url) except Exception: pass url = smart_escape(url) b64 = base64.b64encode(url.encode('utf-8')) return b64.decode('ascii')
[ "def", "base64_encode_url", "(", "url", ")", ":", "try", ":", "url", "=", "unquote", "(", "url", ")", "except", "Exception", ":", "pass", "url", "=", "smart_escape", "(", "url", ")", "b64", "=", "base64", ".", "b64encode", "(", "url", ".", "encode", "(", "'utf-8'", ")", ")", "return", "b64", ".", "decode", "(", "'ascii'", ")" ]
[ 1356, 0 ]
[ 1373, 30 ]
python
en
['en', 'error', 'th']
False
base64url_encode
(data)
Url safe version of urlsafe_b64encode with stripped `=` sign at the end. :param data: input data :return: Base64 URL safe encoded string
Url safe version of urlsafe_b64encode with stripped `=` sign at the end.
def base64url_encode(data): """ Url safe version of urlsafe_b64encode with stripped `=` sign at the end. :param data: input data :return: Base64 URL safe encoded string """ return to_string(base64.urlsafe_b64encode(to_bytes(data)))
[ "def", "base64url_encode", "(", "data", ")", ":", "return", "to_string", "(", "base64", ".", "urlsafe_b64encode", "(", "to_bytes", "(", "data", ")", ")", ")" ]
[ 1376, 0 ]
[ 1384, 62 ]
python
en
['en', 'error', 'th']
False
encode_unicode_url
(url_str)
Quote and encode possible unicode url string (applicable for python2) :param url_str: Url string to encode :return: Encoded string
Quote and encode possible unicode url string (applicable for python2)
def encode_unicode_url(url_str): """ Quote and encode possible unicode url string (applicable for python2) :param url_str: Url string to encode :return: Encoded string """ if six.PY2: url_str = urllib.quote(url_str.encode('utf-8'), ":/?#[]@!$&'()*+,;=") return url_str
[ "def", "encode_unicode_url", "(", "url_str", ")", ":", "if", "six", ".", "PY2", ":", "url_str", "=", "urllib", ".", "quote", "(", "url_str", ".", "encode", "(", "'utf-8'", ")", ",", "\":/?#[]@!$&'()*+,;=\"", ")", "return", "url_str" ]
[ 1387, 0 ]
[ 1398, 18 ]
python
en
['en', 'error', 'th']
False
__json_serializer
(obj)
JSON serializer for objects not serializable by default json code
JSON serializer for objects not serializable by default json code
def __json_serializer(obj): """JSON serializer for objects not serializable by default json code""" if isinstance(obj, (datetime, date)): return obj.isoformat() raise TypeError("Object of type %s is not JSON serializable" % type(obj))
[ "def", "__json_serializer", "(", "obj", ")", ":", "if", "isinstance", "(", "obj", ",", "(", "datetime", ",", "date", ")", ")", ":", "return", "obj", ".", "isoformat", "(", ")", "raise", "TypeError", "(", "\"Object of type %s is not JSON serializable\"", "%", "type", "(", "obj", ")", ")" ]
[ 1401, 0 ]
[ 1405, 77 ]
python
en
['en', 'en', 'en']
True
is_remote_url
(file)
Basic URL scheme check to define if it's remote URL
Basic URL scheme check to define if it's remote URL
def is_remote_url(file): """Basic URL scheme check to define if it's remote URL""" return isinstance(file, string_types) and re.match(REMOTE_URL_RE, file)
[ "def", "is_remote_url", "(", "file", ")", ":", "return", "isinstance", "(", "file", ",", "string_types", ")", "and", "re", ".", "match", "(", "REMOTE_URL_RE", ",", "file", ")" ]
[ 1408, 0 ]
[ 1410, 75 ]
python
de
['it', 'de', 'en']
False
file_io_size
(file_io)
Helper function for getting file-like object size(suitable for both files and streams) :param file_io: io.IOBase :return: size
Helper function for getting file-like object size(suitable for both files and streams)
def file_io_size(file_io): """ Helper function for getting file-like object size(suitable for both files and streams) :param file_io: io.IOBase :return: size """ initial_position = file_io.tell() file_io.seek(0, os.SEEK_END) size = file_io.tell() file_io.seek(initial_position, os.SEEK_SET) return size
[ "def", "file_io_size", "(", "file_io", ")", ":", "initial_position", "=", "file_io", ".", "tell", "(", ")", "file_io", ".", "seek", "(", "0", ",", "os", ".", "SEEK_END", ")", "size", "=", "file_io", ".", "tell", "(", ")", "file_io", ".", "seek", "(", "initial_position", ",", "os", ".", "SEEK_SET", ")", "return", "size" ]
[ 1413, 0 ]
[ 1426, 15 ]
python
en
['en', 'error', 'th']
False
check_property_enabled
(f)
Used as a class method decorator to check whether class is enabled(self.enabled is True) :param f: function to call :return: None if not enabled, otherwise calls function f
Used as a class method decorator to check whether class is enabled(self.enabled is True)
def check_property_enabled(f): """ Used as a class method decorator to check whether class is enabled(self.enabled is True) :param f: function to call :return: None if not enabled, otherwise calls function f """ def wrapper(*args, **kwargs): if not args[0].enabled: return None return f(*args, **kwargs) return wrapper
[ "def", "check_property_enabled", "(", "f", ")", ":", "def", "wrapper", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "not", "args", "[", "0", "]", ".", "enabled", ":", "return", "None", "return", "f", "(", "*", "args", ",", "*", "*", "kwargs", ")", "return", "wrapper" ]
[ 1429, 0 ]
[ 1442, 18 ]
python
en
['en', 'error', 'th']
False
verify_api_response_signature
(public_id, version, signature, algorithm=None)
Verifies the authenticity of an API response signature :param public_id: The public id of the asset as returned in the API response :param version: The version of the asset as returned in the API response :param signature: Actual signature. Can be retrieved from the X-Cld-Signature header :param algorithm: Name of hashing algorithm to use for calculation of HMACs. By default uses `cloudinary.config().signature_algorithm` :return: Boolean result of the validation
Verifies the authenticity of an API response signature
def verify_api_response_signature(public_id, version, signature, algorithm=None): """ Verifies the authenticity of an API response signature :param public_id: The public id of the asset as returned in the API response :param version: The version of the asset as returned in the API response :param signature: Actual signature. Can be retrieved from the X-Cld-Signature header :param algorithm: Name of hashing algorithm to use for calculation of HMACs. By default uses `cloudinary.config().signature_algorithm` :return: Boolean result of the validation """ if not cloudinary.config().api_secret: raise Exception('Api secret key is empty') parameters_to_sign = {'public_id': public_id, 'version': version} return signature == api_sign_request( parameters_to_sign, cloudinary.config().api_secret, algorithm or cloudinary.config().signature_algorithm )
[ "def", "verify_api_response_signature", "(", "public_id", ",", "version", ",", "signature", ",", "algorithm", "=", "None", ")", ":", "if", "not", "cloudinary", ".", "config", "(", ")", ".", "api_secret", ":", "raise", "Exception", "(", "'Api secret key is empty'", ")", "parameters_to_sign", "=", "{", "'public_id'", ":", "public_id", ",", "'version'", ":", "version", "}", "return", "signature", "==", "api_sign_request", "(", "parameters_to_sign", ",", "cloudinary", ".", "config", "(", ")", ".", "api_secret", ",", "algorithm", "or", "cloudinary", ".", "config", "(", ")", ".", "signature_algorithm", ")" ]
[ 1445, 0 ]
[ 1467, 5 ]
python
en
['en', 'error', 'th']
False
verify_notification_signature
(body, timestamp, signature, valid_for=7200, algorithm=None)
Verifies the authenticity of a notification signature :param body: Json of the request's body :param timestamp: Unix timestamp. Can be retrieved from the X-Cld-Timestamp header :param signature: Actual signature. Can be retrieved from the X-Cld-Signature header :param valid_for: The desired time in seconds for considering the request valid :param algorithm: Name of hashing algorithm to use for calculation of HMACs. By default uses `cloudinary.config().signature_algorithm` :return: Boolean result of the validation
Verifies the authenticity of a notification signature
def verify_notification_signature(body, timestamp, signature, valid_for=7200, algorithm=None): """ Verifies the authenticity of a notification signature :param body: Json of the request's body :param timestamp: Unix timestamp. Can be retrieved from the X-Cld-Timestamp header :param signature: Actual signature. Can be retrieved from the X-Cld-Signature header :param valid_for: The desired time in seconds for considering the request valid :param algorithm: Name of hashing algorithm to use for calculation of HMACs. By default uses `cloudinary.config().signature_algorithm` :return: Boolean result of the validation """ if not cloudinary.config().api_secret: raise Exception('Api secret key is empty') if timestamp < time.time() - valid_for: return False if not isinstance(body, str): raise ValueError('Body should be type of string') return signature == compute_hex_hash( '{}{}{}'.format(body, timestamp, cloudinary.config().api_secret), algorithm or cloudinary.config().signature_algorithm)
[ "def", "verify_notification_signature", "(", "body", ",", "timestamp", ",", "signature", ",", "valid_for", "=", "7200", ",", "algorithm", "=", "None", ")", ":", "if", "not", "cloudinary", ".", "config", "(", ")", ".", "api_secret", ":", "raise", "Exception", "(", "'Api secret key is empty'", ")", "if", "timestamp", "<", "time", ".", "time", "(", ")", "-", "valid_for", ":", "return", "False", "if", "not", "isinstance", "(", "body", ",", "str", ")", ":", "raise", "ValueError", "(", "'Body should be type of string'", ")", "return", "signature", "==", "compute_hex_hash", "(", "'{}{}{}'", ".", "format", "(", "body", ",", "timestamp", ",", "cloudinary", ".", "config", "(", ")", ".", "api_secret", ")", ",", "algorithm", "or", "cloudinary", ".", "config", "(", ")", ".", "signature_algorithm", ")" ]
[ 1470, 0 ]
[ 1494, 61 ]
python
en
['en', 'error', 'th']
False
get_http_connector
(conf, options)
Used to create http connector, depends on api_proxy configuration parameter :param conf: configuration object :param options: additional options :return: ProxyManager if api_proxy is set, otherwise PoolManager object
Used to create http connector, depends on api_proxy configuration parameter
def get_http_connector(conf, options): """ Used to create http connector, depends on api_proxy configuration parameter :param conf: configuration object :param options: additional options :return: ProxyManager if api_proxy is set, otherwise PoolManager object """ if conf.api_proxy: return ProxyManager(conf.api_proxy, **options) else: return PoolManager(**options)
[ "def", "get_http_connector", "(", "conf", ",", "options", ")", ":", "if", "conf", ".", "api_proxy", ":", "return", "ProxyManager", "(", "conf", ".", "api_proxy", ",", "*", "*", "options", ")", "else", ":", "return", "PoolManager", "(", "*", "*", "options", ")" ]
[ 1497, 0 ]
[ 1509, 37 ]
python
en
['en', 'error', 'th']
False
safe_cast
(val, casting_fn, default=None)
Attempts to cast a value to another using a given casting function Will return a default value if casting fails (configurable, defaults to None) :param val: The value to cast :param casting_fn: The casting function that will receive the value to cast :param default: The return value if casting fails :return: Result of casting the value or the value of the default parameter
Attempts to cast a value to another using a given casting function Will return a default value if casting fails (configurable, defaults to None)
def safe_cast(val, casting_fn, default=None): """ Attempts to cast a value to another using a given casting function Will return a default value if casting fails (configurable, defaults to None) :param val: The value to cast :param casting_fn: The casting function that will receive the value to cast :param default: The return value if casting fails :return: Result of casting the value or the value of the default parameter """ try: return casting_fn(val) except (ValueError, TypeError): return default
[ "def", "safe_cast", "(", "val", ",", "casting_fn", ",", "default", "=", "None", ")", ":", "try", ":", "return", "casting_fn", "(", "val", ")", "except", "(", "ValueError", ",", "TypeError", ")", ":", "return", "default" ]
[ 1518, 0 ]
[ 1532, 22 ]
python
en
['en', 'error', 'th']
False
compute_power_of_solutions
(template_eval, template_tasks, tier)
Compute power for each solution in eval stats. Solution power is how many tasks an action solves.
Compute power for each solution in eval stats.
def compute_power_of_solutions(template_eval, template_tasks, tier): """Compute power for each solution in eval stats. Solution power is how many tasks an action solves. """ template_tasks = set(template_tasks) actions_on_tasks = template_eval['solution_power'][tier]['actions_on_tasks'] task_ids = template_eval['solution_power'][tier]['task_ids'] indicies = np.array( [i for i in range(len(task_ids)) if task_ids[i] in template_tasks]) actions_template_tasks = actions_on_tasks.take(indicies, axis=1) solves = actions_template_tasks.sum(axis=1) solves.sort() return solves[::-1].tolist()
[ "def", "compute_power_of_solutions", "(", "template_eval", ",", "template_tasks", ",", "tier", ")", ":", "template_tasks", "=", "set", "(", "template_tasks", ")", "actions_on_tasks", "=", "template_eval", "[", "'solution_power'", "]", "[", "tier", "]", "[", "'actions_on_tasks'", "]", "task_ids", "=", "template_eval", "[", "'solution_power'", "]", "[", "tier", "]", "[", "'task_ids'", "]", "indicies", "=", "np", ".", "array", "(", "[", "i", "for", "i", "in", "range", "(", "len", "(", "task_ids", ")", ")", "if", "task_ids", "[", "i", "]", "in", "template_tasks", "]", ")", "actions_template_tasks", "=", "actions_on_tasks", ".", "take", "(", "indicies", ",", "axis", "=", "1", ")", "solves", "=", "actions_template_tasks", ".", "sum", "(", "axis", "=", "1", ")", "solves", ".", "sort", "(", ")", "return", "solves", "[", ":", ":", "-", "1", "]", ".", "tolist", "(", ")" ]
[ 47, 0 ]
[ 60, 32 ]
python
en
['en', 'en', 'en']
True
RequirementTracker.add
(self, req: InstallRequirement)
Add an InstallRequirement to build tracking.
Add an InstallRequirement to build tracking.
def add(self, req: InstallRequirement) -> None: """Add an InstallRequirement to build tracking. """ assert req.link # Get the file to write information about this requirement. entry_path = self._entry_path(req.link) # Try reading from the file. If it exists and can be read from, a build # is already in progress, so a LookupError is raised. try: with open(entry_path) as fp: contents = fp.read() except FileNotFoundError: pass else: message = '{} is already being built: {}'.format( req.link, contents) raise LookupError(message) # If we're here, req should really not be building already. assert req not in self._entries # Start tracking this requirement. with open(entry_path, 'w', encoding="utf-8") as fp: fp.write(str(req)) self._entries.add(req) logger.debug('Added %s to build tracker %r', req, self._root)
[ "def", "add", "(", "self", ",", "req", ":", "InstallRequirement", ")", "->", "None", ":", "assert", "req", ".", "link", "# Get the file to write information about this requirement.", "entry_path", "=", "self", ".", "_entry_path", "(", "req", ".", "link", ")", "# Try reading from the file. If it exists and can be read from, a build", "# is already in progress, so a LookupError is raised.", "try", ":", "with", "open", "(", "entry_path", ")", "as", "fp", ":", "contents", "=", "fp", ".", "read", "(", ")", "except", "FileNotFoundError", ":", "pass", "else", ":", "message", "=", "'{} is already being built: {}'", ".", "format", "(", "req", ".", "link", ",", "contents", ")", "raise", "LookupError", "(", "message", ")", "# If we're here, req should really not be building already.", "assert", "req", "not", "in", "self", ".", "_entries", "# Start tracking this requirement.", "with", "open", "(", "entry_path", ",", "'w'", ",", "encoding", "=", "\"utf-8\"", ")", "as", "fp", ":", "fp", ".", "write", "(", "str", "(", "req", ")", ")", "self", ".", "_entries", ".", "add", "(", "req", ")", "logger", ".", "debug", "(", "'Added %s to build tracker %r'", ",", "req", ",", "self", ".", "_root", ")" ]
[ 78, 4 ]
[ 106, 69 ]
python
en
['en', 'en', 'en']
True
RequirementTracker.remove
(self, req: InstallRequirement)
Remove an InstallRequirement from build tracking.
Remove an InstallRequirement from build tracking.
def remove(self, req: InstallRequirement) -> None: """Remove an InstallRequirement from build tracking. """ assert req.link # Delete the created file and the corresponding entries. os.unlink(self._entry_path(req.link)) self._entries.remove(req) logger.debug('Removed %s from build tracker %r', req, self._root)
[ "def", "remove", "(", "self", ",", "req", ":", "InstallRequirement", ")", "->", "None", ":", "assert", "req", ".", "link", "# Delete the created file and the corresponding entries.", "os", ".", "unlink", "(", "self", ".", "_entry_path", "(", "req", ".", "link", ")", ")", "self", ".", "_entries", ".", "remove", "(", "req", ")", "logger", ".", "debug", "(", "'Removed %s from build tracker %r'", ",", "req", ",", "self", ".", "_root", ")" ]
[ 108, 4 ]
[ 117, 73 ]
python
en
['en', 'en', 'en']
True
identity_block
(input_tensor, kernel_size, filters, stage, block)
The identity block is the block that has no conv layer at shortcut. # Arguments input_tensor: input tensor kernel_size: default 3, the kernel size of middle conv layer at main path filters: list of integers, the filters of 3 conv layer at main path stage: integer, current stage label, used for generating layer names block: 'a','b'..., current block label, used for generating layer names # Returns Output tensor for the block.
The identity block is the block that has no conv layer at shortcut. # Arguments input_tensor: input tensor kernel_size: default 3, the kernel size of middle conv layer at main path filters: list of integers, the filters of 3 conv layer at main path stage: integer, current stage label, used for generating layer names block: 'a','b'..., current block label, used for generating layer names # Returns Output tensor for the block.
def identity_block(input_tensor, kernel_size, filters, stage, block): """The identity block is the block that has no conv layer at shortcut. # Arguments input_tensor: input tensor kernel_size: default 3, the kernel size of middle conv layer at main path filters: list of integers, the filters of 3 conv layer at main path stage: integer, current stage label, used for generating layer names block: 'a','b'..., current block label, used for generating layer names # Returns Output tensor for the block. """ filters1, filters2, filters3 = filters conv_name_base = 'res' + str(stage) + block + '_branch' bn_name_base = 'bn' + str(stage) + block + '_branch' x = layers.Conv2D(filters1, (1, 1), kernel_initializer='he_normal', name=conv_name_base + '2a')(input_tensor) x = layers.BatchNormalization(axis=bn_axis, name=bn_name_base + '2a')(x) x = layers.Activation('relu')(x) x = layers.Conv2D(filters2, kernel_size, padding='same', kernel_initializer='he_normal', name=conv_name_base + '2b')(x) x = layers.BatchNormalization(axis=bn_axis, name=bn_name_base + '2b')(x) x = layers.Activation('relu')(x) x = layers.Conv2D(filters3, (1, 1), kernel_initializer='he_normal', name=conv_name_base + '2c')(x) x = layers.BatchNormalization(axis=bn_axis, name=bn_name_base + '2c')(x) x = layers.add([x, input_tensor]) x = layers.Activation('relu')(x) return x
[ "def", "identity_block", "(", "input_tensor", ",", "kernel_size", ",", "filters", ",", "stage", ",", "block", ")", ":", "filters1", ",", "filters2", ",", "filters3", "=", "filters", "conv_name_base", "=", "'res'", "+", "str", "(", "stage", ")", "+", "block", "+", "'_branch'", "bn_name_base", "=", "'bn'", "+", "str", "(", "stage", ")", "+", "block", "+", "'_branch'", "x", "=", "layers", ".", "Conv2D", "(", "filters1", ",", "(", "1", ",", "1", ")", ",", "kernel_initializer", "=", "'he_normal'", ",", "name", "=", "conv_name_base", "+", "'2a'", ")", "(", "input_tensor", ")", "x", "=", "layers", ".", "BatchNormalization", "(", "axis", "=", "bn_axis", ",", "name", "=", "bn_name_base", "+", "'2a'", ")", "(", "x", ")", "x", "=", "layers", ".", "Activation", "(", "'relu'", ")", "(", "x", ")", "x", "=", "layers", ".", "Conv2D", "(", "filters2", ",", "kernel_size", ",", "padding", "=", "'same'", ",", "kernel_initializer", "=", "'he_normal'", ",", "name", "=", "conv_name_base", "+", "'2b'", ")", "(", "x", ")", "x", "=", "layers", ".", "BatchNormalization", "(", "axis", "=", "bn_axis", ",", "name", "=", "bn_name_base", "+", "'2b'", ")", "(", "x", ")", "x", "=", "layers", ".", "Activation", "(", "'relu'", ")", "(", "x", ")", "x", "=", "layers", ".", "Conv2D", "(", "filters3", ",", "(", "1", ",", "1", ")", ",", "kernel_initializer", "=", "'he_normal'", ",", "name", "=", "conv_name_base", "+", "'2c'", ")", "(", "x", ")", "x", "=", "layers", ".", "BatchNormalization", "(", "axis", "=", "bn_axis", ",", "name", "=", "bn_name_base", "+", "'2c'", ")", "(", "x", ")", "x", "=", "layers", ".", "add", "(", "[", "x", ",", "input_tensor", "]", ")", "x", "=", "layers", ".", "Activation", "(", "'relu'", ")", "(", "x", ")", "return", "x" ]
[ 7, 0 ]
[ 43, 10 ]
python
en
['en', 'en', 'en']
True
conv_block
(input_tensor, kernel_size, filters, stage, block, strides=(2, 2))
A block that has a conv layer at shortcut. # Arguments input_tensor: input tensor kernel_size: default 3, the kernel size of middle conv layer at main path filters: list of integers, the filters of 3 conv layer at main path stage: integer, current stage label, used for generating layer names block: 'a','b'..., current block label, used for generating layer names strides: Strides for the first conv layer in the block. # Returns Output tensor for the block. Note that from stage 3, the first conv layer at main path is with strides=(2, 2) And the shortcut should have strides=(2, 2) as well
A block that has a conv layer at shortcut. # Arguments input_tensor: input tensor kernel_size: default 3, the kernel size of middle conv layer at main path filters: list of integers, the filters of 3 conv layer at main path stage: integer, current stage label, used for generating layer names block: 'a','b'..., current block label, used for generating layer names strides: Strides for the first conv layer in the block. # Returns Output tensor for the block. Note that from stage 3, the first conv layer at main path is with strides=(2, 2) And the shortcut should have strides=(2, 2) as well
def conv_block(input_tensor, kernel_size, filters, stage, block, strides=(2, 2)): """A block that has a conv layer at shortcut. # Arguments input_tensor: input tensor kernel_size: default 3, the kernel size of middle conv layer at main path filters: list of integers, the filters of 3 conv layer at main path stage: integer, current stage label, used for generating layer names block: 'a','b'..., current block label, used for generating layer names strides: Strides for the first conv layer in the block. # Returns Output tensor for the block. Note that from stage 3, the first conv layer at main path is with strides=(2, 2) And the shortcut should have strides=(2, 2) as well """ filters1, filters2, filters3 = filters conv_name_base = 'res' + str(stage) + block + '_branch' bn_name_base = 'bn' + str(stage) + block + '_branch' x = layers.Conv2D(filters1, (1, 1), strides=strides, kernel_initializer='he_normal', name=conv_name_base + '2a')(input_tensor) x = layers.BatchNormalization(axis=bn_axis, name=bn_name_base + '2a')(x) x = layers.Activation('relu')(x) x = layers.Conv2D(filters2, kernel_size, padding='same', kernel_initializer='he_normal', name=conv_name_base + '2b')(x) x = layers.BatchNormalization(axis=bn_axis, name=bn_name_base + '2b')(x) x = layers.Activation('relu')(x) x = layers.Conv2D(filters3, (1, 1), kernel_initializer='he_normal', name=conv_name_base + '2c')(x) x = layers.BatchNormalization(axis=bn_axis, name=bn_name_base + '2c')(x) shortcut = layers.Conv2D(filters3, (1, 1), strides=strides, kernel_initializer='he_normal', name=conv_name_base + '1')(input_tensor) shortcut = layers.BatchNormalization( axis=bn_axis, name=bn_name_base + '1')(shortcut) x = layers.add([x, shortcut]) x = layers.Activation('relu')(x) return x
[ "def", "conv_block", "(", "input_tensor", ",", "kernel_size", ",", "filters", ",", "stage", ",", "block", ",", "strides", "=", "(", "2", ",", "2", ")", ")", ":", "filters1", ",", "filters2", ",", "filters3", "=", "filters", "conv_name_base", "=", "'res'", "+", "str", "(", "stage", ")", "+", "block", "+", "'_branch'", "bn_name_base", "=", "'bn'", "+", "str", "(", "stage", ")", "+", "block", "+", "'_branch'", "x", "=", "layers", ".", "Conv2D", "(", "filters1", ",", "(", "1", ",", "1", ")", ",", "strides", "=", "strides", ",", "kernel_initializer", "=", "'he_normal'", ",", "name", "=", "conv_name_base", "+", "'2a'", ")", "(", "input_tensor", ")", "x", "=", "layers", ".", "BatchNormalization", "(", "axis", "=", "bn_axis", ",", "name", "=", "bn_name_base", "+", "'2a'", ")", "(", "x", ")", "x", "=", "layers", ".", "Activation", "(", "'relu'", ")", "(", "x", ")", "x", "=", "layers", ".", "Conv2D", "(", "filters2", ",", "kernel_size", ",", "padding", "=", "'same'", ",", "kernel_initializer", "=", "'he_normal'", ",", "name", "=", "conv_name_base", "+", "'2b'", ")", "(", "x", ")", "x", "=", "layers", ".", "BatchNormalization", "(", "axis", "=", "bn_axis", ",", "name", "=", "bn_name_base", "+", "'2b'", ")", "(", "x", ")", "x", "=", "layers", ".", "Activation", "(", "'relu'", ")", "(", "x", ")", "x", "=", "layers", ".", "Conv2D", "(", "filters3", ",", "(", "1", ",", "1", ")", ",", "kernel_initializer", "=", "'he_normal'", ",", "name", "=", "conv_name_base", "+", "'2c'", ")", "(", "x", ")", "x", "=", "layers", ".", "BatchNormalization", "(", "axis", "=", "bn_axis", ",", "name", "=", "bn_name_base", "+", "'2c'", ")", "(", "x", ")", "shortcut", "=", "layers", ".", "Conv2D", "(", "filters3", ",", "(", "1", ",", "1", ")", ",", "strides", "=", "strides", ",", "kernel_initializer", "=", "'he_normal'", ",", "name", "=", "conv_name_base", "+", "'1'", ")", "(", "input_tensor", ")", "shortcut", "=", "layers", ".", "BatchNormalization", "(", "axis", "=", "bn_axis", ",", "name", "=", "bn_name_base", "+", "'1'", ")", "(", "shortcut", ")", "x", "=", "layers", ".", "add", "(", "[", "x", ",", "shortcut", "]", ")", "x", "=", "layers", ".", "Activation", "(", "'relu'", ")", "(", "x", ")", "return", "x" ]
[ 46, 0 ]
[ 96, 10 ]
python
en
['en', 'ca', 'en']
True
register_handler
(handler)
Install application-specific WMF image handler. :param handler: Handler object.
Install application-specific WMF image handler.
def register_handler(handler): """ Install application-specific WMF image handler. :param handler: Handler object. """ global _handler _handler = handler
[ "def", "register_handler", "(", "handler", ")", ":", "global", "_handler", "_handler", "=", "handler" ]
[ 30, 0 ]
[ 37, 22 ]
python
en
['en', 'error', 'th']
False
LazySettings._setup
(self, name=None)
Load the settings module pointed to by the environment variable. This is used the first time settings are needed, if the user hasn't configured settings manually.
Load the settings module pointed to by the environment variable. This is used the first time settings are needed, if the user hasn't configured settings manually.
def _setup(self, name=None): """ Load the settings module pointed to by the environment variable. This is used the first time settings are needed, if the user hasn't configured settings manually. """ settings_module = os.environ.get(ENVIRONMENT_VARIABLE) if not settings_module: desc = ("setting %s" % name) if name else "settings" raise ImproperlyConfigured( "Requested %s, but settings are not configured. " "You must either define the environment variable %s " "or call settings.configure() before accessing settings." % (desc, ENVIRONMENT_VARIABLE)) self._wrapped = Settings(settings_module)
[ "def", "_setup", "(", "self", ",", "name", "=", "None", ")", ":", "settings_module", "=", "os", ".", "environ", ".", "get", "(", "ENVIRONMENT_VARIABLE", ")", "if", "not", "settings_module", ":", "desc", "=", "(", "\"setting %s\"", "%", "name", ")", "if", "name", "else", "\"settings\"", "raise", "ImproperlyConfigured", "(", "\"Requested %s, but settings are not configured. \"", "\"You must either define the environment variable %s \"", "\"or call settings.configure() before accessing settings.\"", "%", "(", "desc", ",", "ENVIRONMENT_VARIABLE", ")", ")", "self", ".", "_wrapped", "=", "Settings", "(", "settings_module", ")" ]
[ 53, 4 ]
[ 68, 49 ]
python
en
['en', 'error', 'th']
False
LazySettings.__getattr__
(self, name)
Return the value of a setting and cache it in self.__dict__.
Return the value of a setting and cache it in self.__dict__.
def __getattr__(self, name): """Return the value of a setting and cache it in self.__dict__.""" if self._wrapped is empty: self._setup(name) val = getattr(self._wrapped, name) # Special case some settings which require further modification. # This is done here for performance reasons so the modified value is cached. if name in {'MEDIA_URL', 'STATIC_URL'} and val is not None: val = self._add_script_prefix(val) elif name == 'SECRET_KEY' and not val: raise ImproperlyConfigured("The SECRET_KEY setting must not be empty.") self.__dict__[name] = val return val
[ "def", "__getattr__", "(", "self", ",", "name", ")", ":", "if", "self", ".", "_wrapped", "is", "empty", ":", "self", ".", "_setup", "(", "name", ")", "val", "=", "getattr", "(", "self", ".", "_wrapped", ",", "name", ")", "# Special case some settings which require further modification.", "# This is done here for performance reasons so the modified value is cached.", "if", "name", "in", "{", "'MEDIA_URL'", ",", "'STATIC_URL'", "}", "and", "val", "is", "not", "None", ":", "val", "=", "self", ".", "_add_script_prefix", "(", "val", ")", "elif", "name", "==", "'SECRET_KEY'", "and", "not", "val", ":", "raise", "ImproperlyConfigured", "(", "\"The SECRET_KEY setting must not be empty.\"", ")", "self", ".", "__dict__", "[", "name", "]", "=", "val", "return", "val" ]
[ 78, 4 ]
[ 92, 18 ]
python
en
['en', 'en', 'en']
True
LazySettings.__setattr__
(self, name, value)
Set the value of setting. Clear all cached values if _wrapped changes (@override_settings does this) or clear single values when set.
Set the value of setting. Clear all cached values if _wrapped changes (
def __setattr__(self, name, value): """ Set the value of setting. Clear all cached values if _wrapped changes (@override_settings does this) or clear single values when set. """ if name == '_wrapped': self.__dict__.clear() else: self.__dict__.pop(name, None) super().__setattr__(name, value)
[ "def", "__setattr__", "(", "self", ",", "name", ",", "value", ")", ":", "if", "name", "==", "'_wrapped'", ":", "self", ".", "__dict__", ".", "clear", "(", ")", "else", ":", "self", ".", "__dict__", ".", "pop", "(", "name", ",", "None", ")", "super", "(", ")", ".", "__setattr__", "(", "name", ",", "value", ")" ]
[ 94, 4 ]
[ 103, 40 ]
python
en
['en', 'error', 'th']
False
LazySettings.__delattr__
(self, name)
Delete a setting and clear it from cache if needed.
Delete a setting and clear it from cache if needed.
def __delattr__(self, name): """Delete a setting and clear it from cache if needed.""" super().__delattr__(name) self.__dict__.pop(name, None)
[ "def", "__delattr__", "(", "self", ",", "name", ")", ":", "super", "(", ")", ".", "__delattr__", "(", "name", ")", "self", ".", "__dict__", ".", "pop", "(", "name", ",", "None", ")" ]
[ 105, 4 ]
[ 108, 37 ]
python
en
['en', 'en', 'en']
True
LazySettings.configure
(self, default_settings=global_settings, **options)
Called to manually configure the settings. The 'default_settings' parameter sets where to retrieve any unspecified values from (its argument must support attribute access (__getattr__)).
Called to manually configure the settings. The 'default_settings' parameter sets where to retrieve any unspecified values from (its argument must support attribute access (__getattr__)).
def configure(self, default_settings=global_settings, **options): """ Called to manually configure the settings. The 'default_settings' parameter sets where to retrieve any unspecified values from (its argument must support attribute access (__getattr__)). """ if self._wrapped is not empty: raise RuntimeError('Settings already configured.') holder = UserSettingsHolder(default_settings) for name, value in options.items(): if not name.isupper(): raise TypeError('Setting %r must be uppercase.' % name) setattr(holder, name, value) self._wrapped = holder
[ "def", "configure", "(", "self", ",", "default_settings", "=", "global_settings", ",", "*", "*", "options", ")", ":", "if", "self", ".", "_wrapped", "is", "not", "empty", ":", "raise", "RuntimeError", "(", "'Settings already configured.'", ")", "holder", "=", "UserSettingsHolder", "(", "default_settings", ")", "for", "name", ",", "value", "in", "options", ".", "items", "(", ")", ":", "if", "not", "name", ".", "isupper", "(", ")", ":", "raise", "TypeError", "(", "'Setting %r must be uppercase.'", "%", "name", ")", "setattr", "(", "holder", ",", "name", ",", "value", ")", "self", ".", "_wrapped", "=", "holder" ]
[ 110, 4 ]
[ 123, 30 ]
python
en
['en', 'error', 'th']
False
LazySettings._add_script_prefix
(value)
Add SCRIPT_NAME prefix to relative paths. Useful when the app is being served at a subpath and manually prefixing subpath to STATIC_URL and MEDIA_URL in settings is inconvenient.
Add SCRIPT_NAME prefix to relative paths.
def _add_script_prefix(value): """ Add SCRIPT_NAME prefix to relative paths. Useful when the app is being served at a subpath and manually prefixing subpath to STATIC_URL and MEDIA_URL in settings is inconvenient. """ # Don't apply prefix to absolute paths and URLs. if value.startswith(('http://', 'https://', '/')): return value from django.urls import get_script_prefix return '%s%s' % (get_script_prefix(), value)
[ "def", "_add_script_prefix", "(", "value", ")", ":", "# Don't apply prefix to absolute paths and URLs.", "if", "value", ".", "startswith", "(", "(", "'http://'", ",", "'https://'", ",", "'/'", ")", ")", ":", "return", "value", "from", "django", ".", "urls", "import", "get_script_prefix", "return", "'%s%s'", "%", "(", "get_script_prefix", "(", ")", ",", "value", ")" ]
[ 126, 4 ]
[ 137, 52 ]
python
en
['en', 'error', 'th']
False
LazySettings.configured
(self)
Return True if the settings have already been configured.
Return True if the settings have already been configured.
def configured(self): """Return True if the settings have already been configured.""" return self._wrapped is not empty
[ "def", "configured", "(", "self", ")", ":", "return", "self", ".", "_wrapped", "is", "not", "empty" ]
[ 140, 4 ]
[ 142, 41 ]
python
en
['en', 'en', 'en']
True
UserSettingsHolder.__init__
(self, default_settings)
Requests for configuration variables not in this class are satisfied from the module specified in default_settings (if possible).
Requests for configuration variables not in this class are satisfied from the module specified in default_settings (if possible).
def __init__(self, default_settings): """ Requests for configuration variables not in this class are satisfied from the module specified in default_settings (if possible). """ self.__dict__['_deleted'] = set() self.default_settings = default_settings
[ "def", "__init__", "(", "self", ",", "default_settings", ")", ":", "self", ".", "__dict__", "[", "'_deleted'", "]", "=", "set", "(", ")", "self", ".", "default_settings", "=", "default_settings" ]
[ 227, 4 ]
[ 233, 48 ]
python
en
['en', 'error', 'th']
False
HealpyModel.__init__
(self, network, input_shape=None, optimizer=None, save_dir=None, restore_point=None, summary_dir=None, init_step=0, is_chief=True)
Initializes a base model :param network: The underlying network of the model (expected to be either a tf.keras.Sequential or a subclass of it) :param input_shape: Optional input shape of the network, necessary if one wants to restore the model :param optimizer: Optimizer of the model, defaults to Adam :param save_dir: Directory where to save the weights and so, can be None :param restore_point: Possible restore point, either directory (of which the latest checkpoint will be chosen) or a checkpoint file :param summary_dir: Directory to save the summaries :param init_step: Initial step, defaults to 0 :param is_chief: Chief in case of distributed setting
Initializes a base model :param network: The underlying network of the model (expected to be either a tf.keras.Sequential or a subclass of it) :param input_shape: Optional input shape of the network, necessary if one wants to restore the model :param optimizer: Optimizer of the model, defaults to Adam :param save_dir: Directory where to save the weights and so, can be None :param restore_point: Possible restore point, either directory (of which the latest checkpoint will be chosen) or a checkpoint file :param summary_dir: Directory to save the summaries :param init_step: Initial step, defaults to 0 :param is_chief: Chief in case of distributed setting
def __init__(self, network, input_shape=None, optimizer=None, save_dir=None, restore_point=None, summary_dir=None, init_step=0, is_chief=True): """ Initializes a base model :param network: The underlying network of the model (expected to be either a tf.keras.Sequential or a subclass of it) :param input_shape: Optional input shape of the network, necessary if one wants to restore the model :param optimizer: Optimizer of the model, defaults to Adam :param save_dir: Directory where to save the weights and so, can be None :param restore_point: Possible restore point, either directory (of which the latest checkpoint will be chosen) or a checkpoint file :param summary_dir: Directory to save the summaries :param init_step: Initial step, defaults to 0 :param is_chief: Chief in case of distributed setting """ # get the network self.network = network # save additional variables self.save_dir = save_dir self.restore_point = restore_point self.summary_dir = summary_dir self.input_shape = input_shape self.is_chief = is_chief self.init_step = init_step # set up save dir if self.save_dir is not None: os.makedirs(self.save_dir, exist_ok=True) # set up the optimizer if optimizer is None: self.optimizer = tf.keras.optimizers.Adam(learning_rate=1e-4) else: self.optimizer = optimizer # We build the network if self.input_shape is not None: self.build_network(input_shape=self.input_shape) self.print_summary() # restore the weights if self.restore_point is not None: if input_shape is None: print("WARNING: Network weights can't be restored until <build_network> is called! Either call this " "function manually or provide an inpute_shape during model initialization!") else: print(f"Restoring weights from {self.restore_point}...") self.restore_model() # set up summary writer if self.summary_dir is not None: # check if we are distributed try: _ = hvd.size() rank = f"_{hvd.rank()}" except ValueError: rank = "" # make a directory for the writer os.makedirs(self.summary_dir + rank, exist_ok=True) self.summary_writer = tf.summary.create_file_writer(summary_dir + rank) else: self.summary_writer = None # set the step self.train_step = tf.Variable(self.init_step, trainable=False, name="GlobalStep", dtype=tf.int64) tf.summary.experimental.set_step(self.train_step) # estimator self.estimator = None
[ "def", "__init__", "(", "self", ",", "network", ",", "input_shape", "=", "None", ",", "optimizer", "=", "None", ",", "save_dir", "=", "None", ",", "restore_point", "=", "None", ",", "summary_dir", "=", "None", ",", "init_step", "=", "0", ",", "is_chief", "=", "True", ")", ":", "# get the network", "self", ".", "network", "=", "network", "# save additional variables", "self", ".", "save_dir", "=", "save_dir", "self", ".", "restore_point", "=", "restore_point", "self", ".", "summary_dir", "=", "summary_dir", "self", ".", "input_shape", "=", "input_shape", "self", ".", "is_chief", "=", "is_chief", "self", ".", "init_step", "=", "init_step", "# set up save dir", "if", "self", ".", "save_dir", "is", "not", "None", ":", "os", ".", "makedirs", "(", "self", ".", "save_dir", ",", "exist_ok", "=", "True", ")", "# set up the optimizer", "if", "optimizer", "is", "None", ":", "self", ".", "optimizer", "=", "tf", ".", "keras", ".", "optimizers", ".", "Adam", "(", "learning_rate", "=", "1e-4", ")", "else", ":", "self", ".", "optimizer", "=", "optimizer", "# We build the network", "if", "self", ".", "input_shape", "is", "not", "None", ":", "self", ".", "build_network", "(", "input_shape", "=", "self", ".", "input_shape", ")", "self", ".", "print_summary", "(", ")", "# restore the weights", "if", "self", ".", "restore_point", "is", "not", "None", ":", "if", "input_shape", "is", "None", ":", "print", "(", "\"WARNING: Network weights can't be restored until <build_network> is called! Either call this \"", "\"function manually or provide an inpute_shape during model initialization!\"", ")", "else", ":", "print", "(", "f\"Restoring weights from {self.restore_point}...\"", ")", "self", ".", "restore_model", "(", ")", "# set up summary writer", "if", "self", ".", "summary_dir", "is", "not", "None", ":", "# check if we are distributed", "try", ":", "_", "=", "hvd", ".", "size", "(", ")", "rank", "=", "f\"_{hvd.rank()}\"", "except", "ValueError", ":", "rank", "=", "\"\"", "# make a directory for the writer", "os", ".", "makedirs", "(", "self", ".", "summary_dir", "+", "rank", ",", "exist_ok", "=", "True", ")", "self", ".", "summary_writer", "=", "tf", ".", "summary", ".", "create_file_writer", "(", "summary_dir", "+", "rank", ")", "else", ":", "self", ".", "summary_writer", "=", "None", "# set the step", "self", ".", "train_step", "=", "tf", ".", "Variable", "(", "self", ".", "init_step", ",", "trainable", "=", "False", ",", "name", "=", "\"GlobalStep\"", ",", "dtype", "=", "tf", ".", "int64", ")", "tf", ".", "summary", ".", "experimental", ".", "set_step", "(", "self", ".", "train_step", ")", "# estimator", "self", ".", "estimator", "=", "None" ]
[ 19, 4 ]
[ 90, 29 ]
python
en
['en', 'error', 'th']
False
HealpyModel.clean_summaries
(self, force=False)
Removes redundant summary directories... :param force: force the removal even if the worker is not chief
Removes redundant summary directories... :param force: force the removal even if the worker is not chief
def clean_summaries(self, force=False): """ Removes redundant summary directories... :param force: force the removal even if the worker is not chief """ if self.is_chief or force: try: num_workers = hvd.size() except ValueError: print("Nothing to clean, skipping...") num_workers = 1 for i in range(1, num_workers): rmtree(self.summary_dir + f"_{i}") else: print("Trying to call <clean_summaries> from a worker that is not chief, " "skipping to avoid multiple worker doing the same thing... " "If you are sure that this should happen set force=True when calling this function.")
[ "def", "clean_summaries", "(", "self", ",", "force", "=", "False", ")", ":", "if", "self", ".", "is_chief", "or", "force", ":", "try", ":", "num_workers", "=", "hvd", ".", "size", "(", ")", "except", "ValueError", ":", "print", "(", "\"Nothing to clean, skipping...\"", ")", "num_workers", "=", "1", "for", "i", "in", "range", "(", "1", ",", "num_workers", ")", ":", "rmtree", "(", "self", ".", "summary_dir", "+", "f\"_{i}\"", ")", "else", ":", "print", "(", "\"Trying to call <clean_summaries> from a worker that is not chief, \"", "\"skipping to avoid multiple worker doing the same thing... \"", "\"If you are sure that this should happen set force=True when calling this function.\"", ")" ]
[ 92, 4 ]
[ 109, 103 ]
python
en
['en', 'error', 'th']
False
HealpyModel.update_step
(self)
increments the train step of the model by 1
increments the train step of the model by 1
def update_step(self): """ increments the train step of the model by 1 """ self.train_step.assign(self.train_step + 1)
[ "def", "update_step", "(", "self", ")", ":", "self", ".", "train_step", ".", "assign", "(", "self", ".", "train_step", "+", "1", ")" ]
[ 111, 4 ]
[ 115, 51 ]
python
en
['en', 'error', 'th']
False
HealpyModel.set_step
(self, step)
Sets the current training step of the model to a given value :param step: The new step (int)
Sets the current training step of the model to a given value :param step: The new step (int)
def set_step(self, step): """ Sets the current training step of the model to a given value :param step: The new step (int) """ self.train_step.assign(step)
[ "def", "set_step", "(", "self", ",", "step", ")", ":", "self", ".", "train_step", ".", "assign", "(", "step", ")" ]
[ 117, 4 ]
[ 122, 36 ]
python
en
['en', 'error', 'th']
False
HealpyModel.restore_model
(self, restore_point=None)
Restores the weights of the network given a restore point :param restore_point: either a directory that includes checkpoints (of which the most recent will be chosen) or the path to a specific checkpoint to restore from, default to value at init of model
Restores the weights of the network given a restore point :param restore_point: either a directory that includes checkpoints (of which the most recent will be chosen) or the path to a specific checkpoint to restore from, default to value at init of model
def restore_model(self, restore_point=None): """ Restores the weights of the network given a restore point :param restore_point: either a directory that includes checkpoints (of which the most recent will be chosen) or the path to a specific checkpoint to restore from, default to value at init of model """ if restore_point is None and self.restore_point is None: raise ValueError("No restore point was provided in the initialization or as argument when this function " "was called.") # get the right point if restore_point is None: restore_point = self.restore_point # get the checkpoint if os.path.isdir(restore_point): checkpoint = tf.train.latest_checkpoint(restore_point) else: checkpoint = restore_point # restore self.network.load_weights(checkpoint) # check if we need to broadcast try: num_workers = hvd.size() hvd.broadcast_variables(self.network.weights, root_rank=0) except ValueError: num_workers = 1 if self.is_chief: print(f"Sucessfully resteored weights for {num_workers} workers...")
[ "def", "restore_model", "(", "self", ",", "restore_point", "=", "None", ")", ":", "if", "restore_point", "is", "None", "and", "self", ".", "restore_point", "is", "None", ":", "raise", "ValueError", "(", "\"No restore point was provided in the initialization or as argument when this function \"", "\"was called.\"", ")", "# get the right point", "if", "restore_point", "is", "None", ":", "restore_point", "=", "self", ".", "restore_point", "# get the checkpoint", "if", "os", ".", "path", ".", "isdir", "(", "restore_point", ")", ":", "checkpoint", "=", "tf", ".", "train", ".", "latest_checkpoint", "(", "restore_point", ")", "else", ":", "checkpoint", "=", "restore_point", "# restore", "self", ".", "network", ".", "load_weights", "(", "checkpoint", ")", "# check if we need to broadcast", "try", ":", "num_workers", "=", "hvd", ".", "size", "(", ")", "hvd", ".", "broadcast_variables", "(", "self", ".", "network", ".", "weights", ",", "root_rank", "=", "0", ")", "except", "ValueError", ":", "num_workers", "=", "1", "if", "self", ".", "is_chief", ":", "print", "(", "f\"Sucessfully resteored weights for {num_workers} workers...\"", ")" ]
[ 124, 4 ]
[ 156, 80 ]
python
en
['en', 'error', 'th']
False
HealpyModel.save_model
(self, save_dir=None, force=False)
Saves the weights of the model into a given directory, this function won't do anything if the model is not chief :param save_dir: the path where to save the weights, defaults to the value at init of model :param force: write the checkpoint even if the model is not chief, this can lead to errors if multiple workers write in the same directory concurrently
Saves the weights of the model into a given directory, this function won't do anything if the model is not chief :param save_dir: the path where to save the weights, defaults to the value at init of model :param force: write the checkpoint even if the model is not chief, this can lead to errors if multiple workers write in the same directory concurrently
def save_model(self, save_dir=None, force=False): """ Saves the weights of the model into a given directory, this function won't do anything if the model is not chief :param save_dir: the path where to save the weights, defaults to the value at init of model :param force: write the checkpoint even if the model is not chief, this can lead to errors if multiple workers write in the same directory concurrently """ if self.is_chief or force: if save_dir is None and self.save_dir is None: raise ValueError("No save directory was declared during the init of the model or in this function " "call.") # get and create if save_dir is None: save_dir = self.save_dir os.makedirs(save_dir, exist_ok=True) # save check_point = os.path.join(save_dir, "checkpoint-%i" % (self.train_step.value())) self.network.save_weights(check_point) else: print("Trying to write a checkpoint with a model that is not chief, skipping... " "If you are sure that this should happen set force=True when calling this function.")
[ "def", "save_model", "(", "self", ",", "save_dir", "=", "None", ",", "force", "=", "False", ")", ":", "if", "self", ".", "is_chief", "or", "force", ":", "if", "save_dir", "is", "None", "and", "self", ".", "save_dir", "is", "None", ":", "raise", "ValueError", "(", "\"No save directory was declared during the init of the model or in this function \"", "\"call.\"", ")", "# get and create", "if", "save_dir", "is", "None", ":", "save_dir", "=", "self", ".", "save_dir", "os", ".", "makedirs", "(", "save_dir", ",", "exist_ok", "=", "True", ")", "# save", "check_point", "=", "os", ".", "path", ".", "join", "(", "save_dir", ",", "\"checkpoint-%i\"", "%", "(", "self", ".", "train_step", ".", "value", "(", ")", ")", ")", "self", ".", "network", ".", "save_weights", "(", "check_point", ")", "else", ":", "print", "(", "\"Trying to write a checkpoint with a model that is not chief, skipping... \"", "\"If you are sure that this should happen set force=True when calling this function.\"", ")" ]
[ 158, 4 ]
[ 181, 103 ]
python
en
['en', 'error', 'th']
False
HealpyModel.build_network
(self, input_shape)
Builds the internal HealpyGCNN with a given input shape :param input_shape: input shape of the netork
Builds the internal HealpyGCNN with a given input shape :param input_shape: input shape of the netork
def build_network(self, input_shape): """ Builds the internal HealpyGCNN with a given input shape :param input_shape: input shape of the netork """ self.network.build(input_shape=input_shape)
[ "def", "build_network", "(", "self", ",", "input_shape", ")", ":", "self", ".", "network", ".", "build", "(", "input_shape", "=", "input_shape", ")" ]
[ 183, 4 ]
[ 188, 51 ]
python
en
['en', 'error', 'th']
False
HealpyModel.print_summary
(self, **kwargs)
Prints the summary of the internal network :param kwargs: passed to HealpyGCNN.summary
Prints the summary of the internal network :param kwargs: passed to HealpyGCNN.summary
def print_summary(self, **kwargs): """ Prints the summary of the internal network :param kwargs: passed to HealpyGCNN.summary """ self.network.summary(**kwargs)
[ "def", "print_summary", "(", "self", ",", "*", "*", "kwargs", ")", ":", "self", ".", "network", ".", "summary", "(", "*", "*", "kwargs", ")" ]
[ 190, 4 ]
[ 195, 38 ]
python
en
['en', 'error', 'th']
False
HealpyModel.base_train_step
(self, input_tensor, loss_function, input_labels=None, clip_by_value=None, clip_by_norm=None, clip_by_global_norm=None, training=True, num_workers=None, train_indices=None, return_loss=False)
A base train step given a loss funtion and an input tensor it evaluates the network and performs a single gradient decent step, if multiple clippings are requested the order will be: * by value * by norm * by global norm :param input_tensor: The input of the network :param loss_function: The loss function, a callable that takes predictions of the network as input and, if provided, the input_labels :param input_labels: Labels of the input_tensor :param clip_by_value: Clip the gradients by given 1d array of values into the interval [value[0], value[1]], defaults to no clipping :param clip_by_norm: Clip the gradients by norm, defaults to no clipping :param clip_by_global_norm: Clip the gradients by global norm, defaults to no clipping :param training: whether we are training or not (e.g. matters for batch norm), should be true here :param num_workers: how many replicates are working on the same thing, None means no distribution :param train_indices: A list of indices, if not None only [trainable_variables[i] for i in train_indices] will be trained :param return_loss: If true, he function returns the actual loss value, otherwise, it is a void
A base train step given a loss funtion and an input tensor it evaluates the network and performs a single gradient decent step, if multiple clippings are requested the order will be: * by value * by norm * by global norm :param input_tensor: The input of the network :param loss_function: The loss function, a callable that takes predictions of the network as input and, if provided, the input_labels :param input_labels: Labels of the input_tensor :param clip_by_value: Clip the gradients by given 1d array of values into the interval [value[0], value[1]], defaults to no clipping :param clip_by_norm: Clip the gradients by norm, defaults to no clipping :param clip_by_global_norm: Clip the gradients by global norm, defaults to no clipping :param training: whether we are training or not (e.g. matters for batch norm), should be true here :param num_workers: how many replicates are working on the same thing, None means no distribution :param train_indices: A list of indices, if not None only [trainable_variables[i] for i in train_indices] will be trained :param return_loss: If true, he function returns the actual loss value, otherwise, it is a void
def base_train_step(self, input_tensor, loss_function, input_labels=None, clip_by_value=None, clip_by_norm=None, clip_by_global_norm=None, training=True, num_workers=None, train_indices=None, return_loss=False): """ A base train step given a loss funtion and an input tensor it evaluates the network and performs a single gradient decent step, if multiple clippings are requested the order will be: * by value * by norm * by global norm :param input_tensor: The input of the network :param loss_function: The loss function, a callable that takes predictions of the network as input and, if provided, the input_labels :param input_labels: Labels of the input_tensor :param clip_by_value: Clip the gradients by given 1d array of values into the interval [value[0], value[1]], defaults to no clipping :param clip_by_norm: Clip the gradients by norm, defaults to no clipping :param clip_by_global_norm: Clip the gradients by global norm, defaults to no clipping :param training: whether we are training or not (e.g. matters for batch norm), should be true here :param num_workers: how many replicates are working on the same thing, None means no distribution :param train_indices: A list of indices, if not None only [trainable_variables[i] for i in train_indices] will be trained :param return_loss: If true, he function returns the actual loss value, otherwise, it is a void """ if train_indices is None: train_variables = self.network.trainable_variables else: train_variables = [self.network.trainable_variables[i] for i in train_indices] with tf.GradientTape() as tape: predictions = self.network(input_tensor, training=training) if input_labels is None: loss_val = loss_function(predictions) else: loss_val = loss_function(predictions, input_labels) if self.summary_writer is not None: with self.summary_writer.as_default(): tf.summary.scalar("Loss", loss_val) # update the step self.update_step() if num_workers is not None: # Horovod: add Horovod Distributed GradientTape. tape = hvd.DistributedGradientTape(tape) # get the gradients gradients = tape.gradient(loss_val, train_variables) # clip if clip_by_value is not None: gradients = [tf.clip_by_value(g, clip_by_value[0], clip_by_value[1]) for g in gradients] if clip_by_norm is not None: gradients = [tf.clip_by_norm(g, clip_by_norm) for g in gradients] # get the global norm glob_norm = tf.linalg.global_norm(gradients) if self.summary_writer is not None: with self.summary_writer.as_default(): tf.summary.scalar("Global_Grad_Norm", glob_norm) if clip_by_global_norm is not None: gradients, _ = tf.clip_by_global_norm(gradients, clip_by_global_norm, use_norm=glob_norm) # apply gradients self.optimizer.apply_gradients(zip(gradients, train_variables)) if return_loss: return loss_val
[ "def", "base_train_step", "(", "self", ",", "input_tensor", ",", "loss_function", ",", "input_labels", "=", "None", ",", "clip_by_value", "=", "None", ",", "clip_by_norm", "=", "None", ",", "clip_by_global_norm", "=", "None", ",", "training", "=", "True", ",", "num_workers", "=", "None", ",", "train_indices", "=", "None", ",", "return_loss", "=", "False", ")", ":", "if", "train_indices", "is", "None", ":", "train_variables", "=", "self", ".", "network", ".", "trainable_variables", "else", ":", "train_variables", "=", "[", "self", ".", "network", ".", "trainable_variables", "[", "i", "]", "for", "i", "in", "train_indices", "]", "with", "tf", ".", "GradientTape", "(", ")", "as", "tape", ":", "predictions", "=", "self", ".", "network", "(", "input_tensor", ",", "training", "=", "training", ")", "if", "input_labels", "is", "None", ":", "loss_val", "=", "loss_function", "(", "predictions", ")", "else", ":", "loss_val", "=", "loss_function", "(", "predictions", ",", "input_labels", ")", "if", "self", ".", "summary_writer", "is", "not", "None", ":", "with", "self", ".", "summary_writer", ".", "as_default", "(", ")", ":", "tf", ".", "summary", ".", "scalar", "(", "\"Loss\"", ",", "loss_val", ")", "# update the step", "self", ".", "update_step", "(", ")", "if", "num_workers", "is", "not", "None", ":", "# Horovod: add Horovod Distributed GradientTape.", "tape", "=", "hvd", ".", "DistributedGradientTape", "(", "tape", ")", "# get the gradients", "gradients", "=", "tape", ".", "gradient", "(", "loss_val", ",", "train_variables", ")", "# clip", "if", "clip_by_value", "is", "not", "None", ":", "gradients", "=", "[", "tf", ".", "clip_by_value", "(", "g", ",", "clip_by_value", "[", "0", "]", ",", "clip_by_value", "[", "1", "]", ")", "for", "g", "in", "gradients", "]", "if", "clip_by_norm", "is", "not", "None", ":", "gradients", "=", "[", "tf", ".", "clip_by_norm", "(", "g", ",", "clip_by_norm", ")", "for", "g", "in", "gradients", "]", "# get the global norm", "glob_norm", "=", "tf", ".", "linalg", ".", "global_norm", "(", "gradients", ")", "if", "self", ".", "summary_writer", "is", "not", "None", ":", "with", "self", ".", "summary_writer", ".", "as_default", "(", ")", ":", "tf", ".", "summary", ".", "scalar", "(", "\"Global_Grad_Norm\"", ",", "glob_norm", ")", "if", "clip_by_global_norm", "is", "not", "None", ":", "gradients", ",", "_", "=", "tf", ".", "clip_by_global_norm", "(", "gradients", ",", "clip_by_global_norm", ",", "use_norm", "=", "glob_norm", ")", "# apply gradients", "self", ".", "optimizer", ".", "apply_gradients", "(", "zip", "(", "gradients", ",", "train_variables", ")", ")", "if", "return_loss", ":", "return", "loss_val" ]
[ 197, 4 ]
[ 261, 27 ]
python
en
['en', 'error', 'th']
False
HealpyModel.setup_delta_loss_step
(self, batch_size, off_sets, n_points=1, n_channels=1, n_output=None, jac_weight=0.0, force_params=None, force_weight=1.0, jac_cond_weight=None, use_log_det=True, no_correlations=False, tikhonov_regu=False, weights=None, eps=1e-32, n_partial=None, clip_by_value=None, clip_by_norm=None, clip_by_global_norm=None, img_summary=False, train_indices=None, return_loss=False)
This sets up a function that performs one training step with the delta loss, which tries to maximize the information of the summary statistics. Note it needs the maps need to be ordered in a specific way: * The shape of the maps is (n_points*n_same*(2*n_params+1), len(indices), n_channels) * If one splits the maps into (2*n_params+1) parts among the first axis one has the following scheme: * The first part was generated with the unperturbed parameters * The second part was generated with parameters where off_sets[0] was subtracted from the first param * The third part was generated with parameters where off_sets[0] was added from to first param * The fourth part was generated with parameters where off_sets[1] was subtracted from the second param * and so on The training step function that is set up will only work if the input has a shape: (n_points*n_same*(2*n_params+1), len(indices), n_channels) If multiple clippings are requested the order will be: * by value * by norm * by global norm :param batch_size: How many summaries (unperturbed only) are coming from the same parameter set :param off_sets: The off_sets used to perturb the original parameters and used for the Jacobian calculation :param n_points: number of different parameter sets :param n_channels: number of channels from the input :param n_output: Dimensionality of the summary statistic, defaults to predictions.get_shape()[-1] :param jac_weight: The weight of the Jacobian loss (loss that forces the Jacobian of the summaries to be close to unity (or identity matrix). :param force_params: Either None or a set of parameters with shape (n_points, 1, n_output) which is used to compute a square loss of the unperturbed summaries. It is useful to set this for example to zeros such that the network does not produces arbitrary high summary values :param force_weight: The weight of the square loss of force_params :param jac_cond_weight: If not None, this weight is used to add an additional loss using the matrix condition number of the jacobian :param use_log_det: Use the log of the determinants in the information inequality, should be True. If False the information inequality is not minimized in a proper manner and the training can become unstable. :param no_correlations: Do not consider correlations between the parameter, this means that one tries to find an optimal summary (single value) for each underlying model parameter, only possible if n_output == n_params :param tikhonov_regu: Use Tikhonov regularization of matrices e.g. to avoid vanishing determinants. This is the recommended regularization method as it allows the usage of some optimized routines. :param weights: An 1d array of length n_points, used as weights in means of the different points. :param eps: A small positive value used for regularization of things like logs etc. This should only be increased if tikhonov_regu is used and a error is raised. :param n_partial: To train only on a subset of parameters and not all underlying model parameter. Defaults to None which means the information inequality is minimized in a normal fashion. Note that due to the necessity of some algebraic manipulations n_partial == None and n_partial == n_params lead to slightly different behaviour. :param clip_by_value: Clip the gradients by given 1d array of values into the interval [value[0], value[1]], defaults to no clipping :param clip_by_norm: Clip the gradients by norm, defaults to no clipping :param clip_by_global_norm: Clip the gradients by global norm, defaults to no clipping :param img_summary: image summary of jacobian and covariance :param train_indices: A list of indices, if not None only [trainable_variables[i] for i in train_indices] will be trained :param return_loss: If true, he function that is set up returns the actual loss value, otherwise, it is a void
This sets up a function that performs one training step with the delta loss, which tries to maximize the information of the summary statistics. Note it needs the maps need to be ordered in a specific way: * The shape of the maps is (n_points*n_same*(2*n_params+1), len(indices), n_channels) * If one splits the maps into (2*n_params+1) parts among the first axis one has the following scheme: * The first part was generated with the unperturbed parameters * The second part was generated with parameters where off_sets[0] was subtracted from the first param * The third part was generated with parameters where off_sets[0] was added from to first param * The fourth part was generated with parameters where off_sets[1] was subtracted from the second param * and so on The training step function that is set up will only work if the input has a shape: (n_points*n_same*(2*n_params+1), len(indices), n_channels) If multiple clippings are requested the order will be: * by value * by norm * by global norm :param batch_size: How many summaries (unperturbed only) are coming from the same parameter set :param off_sets: The off_sets used to perturb the original parameters and used for the Jacobian calculation :param n_points: number of different parameter sets :param n_channels: number of channels from the input :param n_output: Dimensionality of the summary statistic, defaults to predictions.get_shape()[-1] :param jac_weight: The weight of the Jacobian loss (loss that forces the Jacobian of the summaries to be close to unity (or identity matrix). :param force_params: Either None or a set of parameters with shape (n_points, 1, n_output) which is used to compute a square loss of the unperturbed summaries. It is useful to set this for example to zeros such that the network does not produces arbitrary high summary values :param force_weight: The weight of the square loss of force_params :param jac_cond_weight: If not None, this weight is used to add an additional loss using the matrix condition number of the jacobian :param use_log_det: Use the log of the determinants in the information inequality, should be True. If False the information inequality is not minimized in a proper manner and the training can become unstable. :param no_correlations: Do not consider correlations between the parameter, this means that one tries to find an optimal summary (single value) for each underlying model parameter, only possible if n_output == n_params :param tikhonov_regu: Use Tikhonov regularization of matrices e.g. to avoid vanishing determinants. This is the recommended regularization method as it allows the usage of some optimized routines. :param weights: An 1d array of length n_points, used as weights in means of the different points. :param eps: A small positive value used for regularization of things like logs etc. This should only be increased if tikhonov_regu is used and a error is raised. :param n_partial: To train only on a subset of parameters and not all underlying model parameter. Defaults to None which means the information inequality is minimized in a normal fashion. Note that due to the necessity of some algebraic manipulations n_partial == None and n_partial == n_params lead to slightly different behaviour. :param clip_by_value: Clip the gradients by given 1d array of values into the interval [value[0], value[1]], defaults to no clipping :param clip_by_norm: Clip the gradients by norm, defaults to no clipping :param clip_by_global_norm: Clip the gradients by global norm, defaults to no clipping :param img_summary: image summary of jacobian and covariance :param train_indices: A list of indices, if not None only [trainable_variables[i] for i in train_indices] will be trained :param return_loss: If true, he function that is set up returns the actual loss value, otherwise, it is a void
def setup_delta_loss_step(self, batch_size, off_sets, n_points=1, n_channels=1, n_output=None, jac_weight=0.0, force_params=None, force_weight=1.0, jac_cond_weight=None, use_log_det=True, no_correlations=False, tikhonov_regu=False, weights=None, eps=1e-32, n_partial=None, clip_by_value=None, clip_by_norm=None, clip_by_global_norm=None, img_summary=False, train_indices=None, return_loss=False): """ This sets up a function that performs one training step with the delta loss, which tries to maximize the information of the summary statistics. Note it needs the maps need to be ordered in a specific way: * The shape of the maps is (n_points*n_same*(2*n_params+1), len(indices), n_channels) * If one splits the maps into (2*n_params+1) parts among the first axis one has the following scheme: * The first part was generated with the unperturbed parameters * The second part was generated with parameters where off_sets[0] was subtracted from the first param * The third part was generated with parameters where off_sets[0] was added from to first param * The fourth part was generated with parameters where off_sets[1] was subtracted from the second param * and so on The training step function that is set up will only work if the input has a shape: (n_points*n_same*(2*n_params+1), len(indices), n_channels) If multiple clippings are requested the order will be: * by value * by norm * by global norm :param batch_size: How many summaries (unperturbed only) are coming from the same parameter set :param off_sets: The off_sets used to perturb the original parameters and used for the Jacobian calculation :param n_points: number of different parameter sets :param n_channels: number of channels from the input :param n_output: Dimensionality of the summary statistic, defaults to predictions.get_shape()[-1] :param jac_weight: The weight of the Jacobian loss (loss that forces the Jacobian of the summaries to be close to unity (or identity matrix). :param force_params: Either None or a set of parameters with shape (n_points, 1, n_output) which is used to compute a square loss of the unperturbed summaries. It is useful to set this for example to zeros such that the network does not produces arbitrary high summary values :param force_weight: The weight of the square loss of force_params :param jac_cond_weight: If not None, this weight is used to add an additional loss using the matrix condition number of the jacobian :param use_log_det: Use the log of the determinants in the information inequality, should be True. If False the information inequality is not minimized in a proper manner and the training can become unstable. :param no_correlations: Do not consider correlations between the parameter, this means that one tries to find an optimal summary (single value) for each underlying model parameter, only possible if n_output == n_params :param tikhonov_regu: Use Tikhonov regularization of matrices e.g. to avoid vanishing determinants. This is the recommended regularization method as it allows the usage of some optimized routines. :param weights: An 1d array of length n_points, used as weights in means of the different points. :param eps: A small positive value used for regularization of things like logs etc. This should only be increased if tikhonov_regu is used and a error is raised. :param n_partial: To train only on a subset of parameters and not all underlying model parameter. Defaults to None which means the information inequality is minimized in a normal fashion. Note that due to the necessity of some algebraic manipulations n_partial == None and n_partial == n_params lead to slightly different behaviour. :param clip_by_value: Clip the gradients by given 1d array of values into the interval [value[0], value[1]], defaults to no clipping :param clip_by_norm: Clip the gradients by norm, defaults to no clipping :param clip_by_global_norm: Clip the gradients by global norm, defaults to no clipping :param img_summary: image summary of jacobian and covariance :param train_indices: A list of indices, if not None only [trainable_variables[i] for i in train_indices] will be trained :param return_loss: If true, he function that is set up returns the actual loss value, otherwise, it is a void """ # check if we run in distributed fashion try: num_workers = hvd.size() except ValueError: num_workers = None # some definitions n_params = len(off_sets) # setup a loss function def loss_func(predictions): return losses.delta_loss(predictions=predictions, n_params=n_params, n_same=batch_size, off_sets=off_sets, n_output=n_output, jac_weight=jac_weight, force_params=force_params, force_weight=force_weight, jac_cond_weight=jac_cond_weight, use_log_det=use_log_det, no_correlations=no_correlations, tikhonov_regu=tikhonov_regu, summary_writer=self.summary_writer, training=True, weights=weights, eps=eps, n_partial=n_partial, num_workers=num_workers, img_summary=img_summary) # get the backend float and input shape current_float = losses._get_backend_floatx() in_shape = (n_points * batch_size * (2 * n_params + 1), len(self.network.indices_in), n_channels) # tf function with nice signature if return_loss: @tf.function(input_signature=[tf.TensorSpec(shape=in_shape, dtype=current_float)]) def delta_train_step(input_batch): loss_val = self.base_train_step(input_tensor=input_batch, loss_function=loss_func, input_labels=None, clip_by_value=clip_by_value, clip_by_norm=clip_by_norm, clip_by_global_norm=clip_by_global_norm, training=True, num_workers=num_workers, train_indices=train_indices, return_loss=return_loss) return loss_val else: @tf.function(input_signature=[tf.TensorSpec(shape=in_shape, dtype=current_float)]) def delta_train_step(input_batch): self.base_train_step(input_tensor=input_batch, loss_function=loss_func, input_labels=None, clip_by_value=clip_by_value, clip_by_norm=clip_by_norm, clip_by_global_norm=clip_by_global_norm, training=True, num_workers=num_workers, train_indices=train_indices, return_loss=return_loss) self.delta_train_step = delta_train_step if num_workers is not None: print("It it important to call the function <broadcast_variables> after the first gradient descent step, " "to ensure that everything is correctly initialized (also the optimizer)")
[ "def", "setup_delta_loss_step", "(", "self", ",", "batch_size", ",", "off_sets", ",", "n_points", "=", "1", ",", "n_channels", "=", "1", ",", "n_output", "=", "None", ",", "jac_weight", "=", "0.0", ",", "force_params", "=", "None", ",", "force_weight", "=", "1.0", ",", "jac_cond_weight", "=", "None", ",", "use_log_det", "=", "True", ",", "no_correlations", "=", "False", ",", "tikhonov_regu", "=", "False", ",", "weights", "=", "None", ",", "eps", "=", "1e-32", ",", "n_partial", "=", "None", ",", "clip_by_value", "=", "None", ",", "clip_by_norm", "=", "None", ",", "clip_by_global_norm", "=", "None", ",", "img_summary", "=", "False", ",", "train_indices", "=", "None", ",", "return_loss", "=", "False", ")", ":", "# check if we run in distributed fashion", "try", ":", "num_workers", "=", "hvd", ".", "size", "(", ")", "except", "ValueError", ":", "num_workers", "=", "None", "# some definitions", "n_params", "=", "len", "(", "off_sets", ")", "# setup a loss function", "def", "loss_func", "(", "predictions", ")", ":", "return", "losses", ".", "delta_loss", "(", "predictions", "=", "predictions", ",", "n_params", "=", "n_params", ",", "n_same", "=", "batch_size", ",", "off_sets", "=", "off_sets", ",", "n_output", "=", "n_output", ",", "jac_weight", "=", "jac_weight", ",", "force_params", "=", "force_params", ",", "force_weight", "=", "force_weight", ",", "jac_cond_weight", "=", "jac_cond_weight", ",", "use_log_det", "=", "use_log_det", ",", "no_correlations", "=", "no_correlations", ",", "tikhonov_regu", "=", "tikhonov_regu", ",", "summary_writer", "=", "self", ".", "summary_writer", ",", "training", "=", "True", ",", "weights", "=", "weights", ",", "eps", "=", "eps", ",", "n_partial", "=", "n_partial", ",", "num_workers", "=", "num_workers", ",", "img_summary", "=", "img_summary", ")", "# get the backend float and input shape", "current_float", "=", "losses", ".", "_get_backend_floatx", "(", ")", "in_shape", "=", "(", "n_points", "*", "batch_size", "*", "(", "2", "*", "n_params", "+", "1", ")", ",", "len", "(", "self", ".", "network", ".", "indices_in", ")", ",", "n_channels", ")", "# tf function with nice signature", "if", "return_loss", ":", "@", "tf", ".", "function", "(", "input_signature", "=", "[", "tf", ".", "TensorSpec", "(", "shape", "=", "in_shape", ",", "dtype", "=", "current_float", ")", "]", ")", "def", "delta_train_step", "(", "input_batch", ")", ":", "loss_val", "=", "self", ".", "base_train_step", "(", "input_tensor", "=", "input_batch", ",", "loss_function", "=", "loss_func", ",", "input_labels", "=", "None", ",", "clip_by_value", "=", "clip_by_value", ",", "clip_by_norm", "=", "clip_by_norm", ",", "clip_by_global_norm", "=", "clip_by_global_norm", ",", "training", "=", "True", ",", "num_workers", "=", "num_workers", ",", "train_indices", "=", "train_indices", ",", "return_loss", "=", "return_loss", ")", "return", "loss_val", "else", ":", "@", "tf", ".", "function", "(", "input_signature", "=", "[", "tf", ".", "TensorSpec", "(", "shape", "=", "in_shape", ",", "dtype", "=", "current_float", ")", "]", ")", "def", "delta_train_step", "(", "input_batch", ")", ":", "self", ".", "base_train_step", "(", "input_tensor", "=", "input_batch", ",", "loss_function", "=", "loss_func", ",", "input_labels", "=", "None", ",", "clip_by_value", "=", "clip_by_value", ",", "clip_by_norm", "=", "clip_by_norm", ",", "clip_by_global_norm", "=", "clip_by_global_norm", ",", "training", "=", "True", ",", "num_workers", "=", "num_workers", ",", "train_indices", "=", "train_indices", ",", "return_loss", "=", "return_loss", ")", "self", ".", "delta_train_step", "=", "delta_train_step", "if", "num_workers", "is", "not", "None", ":", "print", "(", "\"It it important to call the function <broadcast_variables> after the first gradient descent step, \"", "\"to ensure that everything is correctly initialized (also the optimizer)\"", ")" ]
[ 264, 4 ]
[ 367, 92 ]
python
en
['en', 'error', 'th']
False
HealpyModel.broadcast_variables
(self)
boradcasts the variables from the chief to all other workers from the network and optimizer
boradcasts the variables from the chief to all other workers from the network and optimizer
def broadcast_variables(self): """ boradcasts the variables from the chief to all other workers from the network and optimizer """ hvd.broadcast_variables(self.network.weights, root_rank=0) hvd.broadcast_variables(self.optimizer.variables(), root_rank=0)
[ "def", "broadcast_variables", "(", "self", ")", ":", "hvd", ".", "broadcast_variables", "(", "self", ".", "network", ".", "weights", ",", "root_rank", "=", "0", ")", "hvd", ".", "broadcast_variables", "(", "self", ".", "optimizer", ".", "variables", "(", ")", ",", "root_rank", "=", "0", ")" ]
[ 369, 4 ]
[ 374, 72 ]
python
en
['en', 'error', 'th']
False
HealpyModel.setup_1st_order_estimator
(self, dset, fidu_param, off_sets, print_params=False, tf_dtype=tf.float32, tikohnov=0.0, layer=None, dset_is_sims=False)
Sets up a first order estimator from a given dataset that will be evaluated :param dset: The dataset that will be evaluated :param fidu_param: the fiducial parameter of the estimator :param off_sets: the offsets used for the perturbations :param print_params: print the calculated params :param tf_dtype: the tensorflow datatype to use :param tikohnov: Add tikohnov regularization before inverting the jacobian :param layer: integer, propagate only up to this layer, can be -1 :param dset_is_sims: If Ture, dset will be treated as evaluations
Sets up a first order estimator from a given dataset that will be evaluated :param dset: The dataset that will be evaluated :param fidu_param: the fiducial parameter of the estimator :param off_sets: the offsets used for the perturbations :param print_params: print the calculated params :param tf_dtype: the tensorflow datatype to use :param tikohnov: Add tikohnov regularization before inverting the jacobian :param layer: integer, propagate only up to this layer, can be -1 :param dset_is_sims: If Ture, dset will be treated as evaluations
def setup_1st_order_estimator(self, dset, fidu_param, off_sets, print_params=False, tf_dtype=tf.float32, tikohnov=0.0, layer=None, dset_is_sims=False): """ Sets up a first order estimator from a given dataset that will be evaluated :param dset: The dataset that will be evaluated :param fidu_param: the fiducial parameter of the estimator :param off_sets: the offsets used for the perturbations :param print_params: print the calculated params :param tf_dtype: the tensorflow datatype to use :param tikohnov: Add tikohnov regularization before inverting the jacobian :param layer: integer, propagate only up to this layer, can be -1 :param dset_is_sims: If Ture, dset will be treated as evaluations """ # set the layer self.estimator_layer = layer # dimension check fidu_param = np.atleast_2d(fidu_param) n_param = fidu_param.shape[-1] n_splits = 2 * n_param + 1 if dset_is_sims: predictions = dset else: # get the predictions predictions = [] for batch in dset: predictions.append(np.split(self.__call__(batch, training=False, layer=self.estimator_layer).numpy(), indices_or_sections=n_splits, axis=0)) # concat predictions = np.concatenate(predictions, axis=1) self.estimator = estimator_1st_order(sims=predictions, fiducial_point=fidu_param, offsets=off_sets, print_params=print_params, tf_dtype=tf_dtype, tikohnov=tikohnov)
[ "def", "setup_1st_order_estimator", "(", "self", ",", "dset", ",", "fidu_param", ",", "off_sets", ",", "print_params", "=", "False", ",", "tf_dtype", "=", "tf", ".", "float32", ",", "tikohnov", "=", "0.0", ",", "layer", "=", "None", ",", "dset_is_sims", "=", "False", ")", ":", "# set the layer", "self", ".", "estimator_layer", "=", "layer", "# dimension check", "fidu_param", "=", "np", ".", "atleast_2d", "(", "fidu_param", ")", "n_param", "=", "fidu_param", ".", "shape", "[", "-", "1", "]", "n_splits", "=", "2", "*", "n_param", "+", "1", "if", "dset_is_sims", ":", "predictions", "=", "dset", "else", ":", "# get the predictions", "predictions", "=", "[", "]", "for", "batch", "in", "dset", ":", "predictions", ".", "append", "(", "np", ".", "split", "(", "self", ".", "__call__", "(", "batch", ",", "training", "=", "False", ",", "layer", "=", "self", ".", "estimator_layer", ")", ".", "numpy", "(", ")", ",", "indices_or_sections", "=", "n_splits", ",", "axis", "=", "0", ")", ")", "# concat", "predictions", "=", "np", ".", "concatenate", "(", "predictions", ",", "axis", "=", "1", ")", "self", ".", "estimator", "=", "estimator_1st_order", "(", "sims", "=", "predictions", ",", "fiducial_point", "=", "fidu_param", ",", "offsets", "=", "off_sets", ",", "print_params", "=", "print_params", ",", "tf_dtype", "=", "tf_dtype", ",", "tikohnov", "=", "tikohnov", ")" ]
[ 376, 4 ]
[ 409, 109 ]
python
en
['en', 'error', 'th']
False
HealpyModel.estimate
(self, input_tensor)
Calculates the first order estimates of the underlying model parameter given a network input :param input_tensor: The input to feed in the network :return: The parameter estimates
Calculates the first order estimates of the underlying model parameter given a network input :param input_tensor: The input to feed in the network :return: The parameter estimates
def estimate(self, input_tensor): """ Calculates the first order estimates of the underlying model parameter given a network input :param input_tensor: The input to feed in the network :return: The parameter estimates """ if self.estimator is None: raise ValueError("First order estimator not set! Call <setup_1st_order_estimator> first!") preds = self.__call__(input_tensor, training=False, layer=self.estimator_layer) return self.estimator(preds)
[ "def", "estimate", "(", "self", ",", "input_tensor", ")", ":", "if", "self", ".", "estimator", "is", "None", ":", "raise", "ValueError", "(", "\"First order estimator not set! Call <setup_1st_order_estimator> first!\"", ")", "preds", "=", "self", ".", "__call__", "(", "input_tensor", ",", "training", "=", "False", ",", "layer", "=", "self", ".", "estimator_layer", ")", "return", "self", ".", "estimator", "(", "preds", ")" ]
[ 411, 4 ]
[ 422, 36 ]
python
en
['en', 'error', 'th']
False
HealpyModel.__call__
(self, input_tensor, training=False, numpy=False, layer=None, *args, **kwargs)
Calls the underlying network :param input_tensor: the tensor (or array) to call on :param training: whether we are training or evaluating (e.g. necessary gor batch norm) :param args: additional arguments passed to the network :param kwargs: additional keyword arguments passed to the network :param numpy: return a numpy array instead of a tensor :param layer: integer, propagate only up to this layer, can be -1 :return: either a tensor or an array depending on param numpy
Calls the underlying network :param input_tensor: the tensor (or array) to call on :param training: whether we are training or evaluating (e.g. necessary gor batch norm) :param args: additional arguments passed to the network :param kwargs: additional keyword arguments passed to the network :param numpy: return a numpy array instead of a tensor :param layer: integer, propagate only up to this layer, can be -1 :return: either a tensor or an array depending on param numpy
def __call__(self, input_tensor, training=False, numpy=False, layer=None, *args, **kwargs): """ Calls the underlying network :param input_tensor: the tensor (or array) to call on :param training: whether we are training or evaluating (e.g. necessary gor batch norm) :param args: additional arguments passed to the network :param kwargs: additional keyword arguments passed to the network :param numpy: return a numpy array instead of a tensor :param layer: integer, propagate only up to this layer, can be -1 :return: either a tensor or an array depending on param numpy """ if layer is None: preds = self.network(input_tensor, training=training, *args, **kwargs) else: preds = input_tensor for layer in self.network.layers[:layer]: preds = layer(preds) if numpy: return preds.numpy() else: return preds
[ "def", "__call__", "(", "self", ",", "input_tensor", ",", "training", "=", "False", ",", "numpy", "=", "False", ",", "layer", "=", "None", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "layer", "is", "None", ":", "preds", "=", "self", ".", "network", "(", "input_tensor", ",", "training", "=", "training", ",", "*", "args", ",", "*", "*", "kwargs", ")", "else", ":", "preds", "=", "input_tensor", "for", "layer", "in", "self", ".", "network", ".", "layers", "[", ":", "layer", "]", ":", "preds", "=", "layer", "(", "preds", ")", "if", "numpy", ":", "return", "preds", ".", "numpy", "(", ")", "else", ":", "return", "preds" ]
[ 424, 4 ]
[ 445, 24 ]
python
en
['en', 'error', 'th']
False
UserNSVD1.__init__
(self, train_file=None, test_file=None, metadata_file=None, output_file=None, epochs=30, learn_rate=0.01, delta=0.015, factors=10, init_mean=0, init_stdev=0.1, stop_criteria=0.001, batch=False, n2=10, learn_rate2=0.01, delta2=0.015, sep='\t', output_sep='\t', metadata_sep='\t', metadata_as_binary=False, random_seed=None)
UserNSVD1 for rating prediction Usage:: >> UserNSVD1(train, test, metadata_file='user_metadata.dat').compute() >> UserNSVD1(train, test, metadata_file='user_metadata.dat', batch=True).compute() :param train_file: File which contains the train set. This file needs to have at least 3 columns (user item feedback_value). :type train_file: str :param test_file: File which contains the test set. This file needs to have at least 3 columns (user item feedback_value). :type test_file: str, default None :param metadata_file: File which contains the metadata set. This file needs to have at least 2 columns (user metadata). :type metadata_file: str :param output_file: File with dir to write the final predictions :type output_file: str, default None :param epochs: Number of epochs over the training data :type epochs: int, default 10 :param learn_rate: Learning rate (alpha) :type learn_rate: float, default 0.05 :param delta: Regularization value :type delta: float, default 0.015 :param factors: Number of latent factors per user/item :type factors: int, default 10 :param init_mean: Mean of the normal distribution used to initialize the latent factors :type init_mean: float, default 0 :param init_stdev: Standard deviation of the normal distribution used to initialize the latent factors :type init_stdev: float, default 0.1 :param stop_criteria: Difference between errors for stopping criteria :type stop_criteria: float, default 0.001 :param batch: Tf True, use batch model to train the model :type batch: bool, default False :param n2: Number of interactions in batch step :type n2: int, default 10 :param learn_rate2: Learning rate in batch step :type learn_rate2: float, default 0.01 :param delta2: Regularization value in Batch step :type delta2: float, default 0.015 :param sep: Delimiter for input files :type sep: str, default '\t' :param output_sep: Delimiter for output file :type output_sep: str, default '\t' :param metadata_sep: Delimiter for similarity or metadata file :type metadata_sep: str, default '\t' :param metadata_as_binary: f True, the explicit value will be transform to binary :type metadata_as_binary: bool, default False :param random_seed: Number of seed. Lock random numbers for reproducibility of experiments. :type random_seed: int, default None
UserNSVD1 for rating prediction
def __init__(self, train_file=None, test_file=None, metadata_file=None, output_file=None, epochs=30, learn_rate=0.01, delta=0.015, factors=10, init_mean=0, init_stdev=0.1, stop_criteria=0.001, batch=False, n2=10, learn_rate2=0.01, delta2=0.015, sep='\t', output_sep='\t', metadata_sep='\t', metadata_as_binary=False, random_seed=None): """ UserNSVD1 for rating prediction Usage:: >> UserNSVD1(train, test, metadata_file='user_metadata.dat').compute() >> UserNSVD1(train, test, metadata_file='user_metadata.dat', batch=True).compute() :param train_file: File which contains the train set. This file needs to have at least 3 columns (user item feedback_value). :type train_file: str :param test_file: File which contains the test set. This file needs to have at least 3 columns (user item feedback_value). :type test_file: str, default None :param metadata_file: File which contains the metadata set. This file needs to have at least 2 columns (user metadata). :type metadata_file: str :param output_file: File with dir to write the final predictions :type output_file: str, default None :param epochs: Number of epochs over the training data :type epochs: int, default 10 :param learn_rate: Learning rate (alpha) :type learn_rate: float, default 0.05 :param delta: Regularization value :type delta: float, default 0.015 :param factors: Number of latent factors per user/item :type factors: int, default 10 :param init_mean: Mean of the normal distribution used to initialize the latent factors :type init_mean: float, default 0 :param init_stdev: Standard deviation of the normal distribution used to initialize the latent factors :type init_stdev: float, default 0.1 :param stop_criteria: Difference between errors for stopping criteria :type stop_criteria: float, default 0.001 :param batch: Tf True, use batch model to train the model :type batch: bool, default False :param n2: Number of interactions in batch step :type n2: int, default 10 :param learn_rate2: Learning rate in batch step :type learn_rate2: float, default 0.01 :param delta2: Regularization value in Batch step :type delta2: float, default 0.015 :param sep: Delimiter for input files :type sep: str, default '\t' :param output_sep: Delimiter for output file :type output_sep: str, default '\t' :param metadata_sep: Delimiter for similarity or metadata file :type metadata_sep: str, default '\t' :param metadata_as_binary: f True, the explicit value will be transform to binary :type metadata_as_binary: bool, default False :param random_seed: Number of seed. Lock random numbers for reproducibility of experiments. :type random_seed: int, default None """ super(UserNSVD1, self).__init__(train_file=train_file, test_file=test_file, output_file=output_file, factors=factors, init_mean=init_mean, init_stdev=init_stdev, sep=sep, output_sep=output_sep, random_seed=random_seed) self.recommender_name = 'UserNSVD1' self.metadata_file = metadata_file self.batch = batch self.epochs = epochs self.learn_rate = learn_rate self.delta = delta self.stop_criteria = stop_criteria self.n2 = n2 self.learn_rate2 = learn_rate2 self.delta2 = delta2 self.metadata_sep = metadata_sep self.metadata_as_binary = metadata_as_binary # internal vars self.x = None self.non_zero_x = None self.d = None
[ "def", "__init__", "(", "self", ",", "train_file", "=", "None", ",", "test_file", "=", "None", ",", "metadata_file", "=", "None", ",", "output_file", "=", "None", ",", "epochs", "=", "30", ",", "learn_rate", "=", "0.01", ",", "delta", "=", "0.015", ",", "factors", "=", "10", ",", "init_mean", "=", "0", ",", "init_stdev", "=", "0.1", ",", "stop_criteria", "=", "0.001", ",", "batch", "=", "False", ",", "n2", "=", "10", ",", "learn_rate2", "=", "0.01", ",", "delta2", "=", "0.015", ",", "sep", "=", "'\\t'", ",", "output_sep", "=", "'\\t'", ",", "metadata_sep", "=", "'\\t'", ",", "metadata_as_binary", "=", "False", ",", "random_seed", "=", "None", ")", ":", "super", "(", "UserNSVD1", ",", "self", ")", ".", "__init__", "(", "train_file", "=", "train_file", ",", "test_file", "=", "test_file", ",", "output_file", "=", "output_file", ",", "factors", "=", "factors", ",", "init_mean", "=", "init_mean", ",", "init_stdev", "=", "init_stdev", ",", "sep", "=", "sep", ",", "output_sep", "=", "output_sep", ",", "random_seed", "=", "random_seed", ")", "self", ".", "recommender_name", "=", "'UserNSVD1'", "self", ".", "metadata_file", "=", "metadata_file", "self", ".", "batch", "=", "batch", "self", ".", "epochs", "=", "epochs", "self", ".", "learn_rate", "=", "learn_rate", "self", ".", "delta", "=", "delta", "self", ".", "stop_criteria", "=", "stop_criteria", "self", ".", "n2", "=", "n2", "self", ".", "learn_rate2", "=", "learn_rate2", "self", ".", "delta2", "=", "delta2", "self", ".", "metadata_sep", "=", "metadata_sep", "self", ".", "metadata_as_binary", "=", "metadata_as_binary", "# internal vars", "self", ".", "x", "=", "None", "self", ".", "non_zero_x", "=", "None", "self", ".", "d", "=", "None" ]
[ 27, 4 ]
[ 125, 21 ]
python
en
['en', 'error', 'th']
False
UserNSVD1.init_model
(self)
Method to treat and initialize the model. Extends init_model from BaseNSVD1
Method to treat and initialize the model. Extends init_model from BaseNSVD1
def init_model(self): """ Method to treat and initialize the model. Extends init_model from BaseNSVD1 """ super(UserNSVD1, self).init_model() self.non_zero_x = [] self.d = [] self.metadata = ReadFile(self.metadata_file, sep=self.metadata_sep, as_binary=self.metadata_as_binary ).read_metadata_or_similarity() # create metadata matrix (user x metadata) self.x = np.zeros((self.number_users, len(self.metadata['col_2']))) meta_to_meta_id = {} for m, data in enumerate(self.metadata['col_2']): meta_to_meta_id[data] = m for user_m in self.metadata['col_1']: for m1 in self.metadata['dict'][user_m]: self.x[self.user_to_user_id[user_m], meta_to_meta_id[m1]] = self.metadata['dict'][user_m][m1] # create header info for metadata sparsity = (1 - (self.metadata['number_interactions'] / (len(self.metadata['col_1']) * len(self.metadata['col_2'])))) * 100 self.extra_info_header = ">> metadata:: %d users and %d metadata (%d interactions) | sparsity:: %.2f%%" % \ (len(self.metadata['col_1']), len(self.metadata['col_2']), self.metadata['number_interactions'], sparsity) self.number_metadata = len(self.metadata['col_2']) for u in range(self.number_users): self.non_zero_x.append(list(np.where(self.x[u] != 0)[0])) with np.errstate(divide='ignore'): self.d.append(1 / np.dot(self.x[u].T, self.x[u])) # Create Factors self.create_factors()
[ "def", "init_model", "(", "self", ")", ":", "super", "(", "UserNSVD1", ",", "self", ")", ".", "init_model", "(", ")", "self", ".", "non_zero_x", "=", "[", "]", "self", ".", "d", "=", "[", "]", "self", ".", "metadata", "=", "ReadFile", "(", "self", ".", "metadata_file", ",", "sep", "=", "self", ".", "metadata_sep", ",", "as_binary", "=", "self", ".", "metadata_as_binary", ")", ".", "read_metadata_or_similarity", "(", ")", "# create metadata matrix (user x metadata)", "self", ".", "x", "=", "np", ".", "zeros", "(", "(", "self", ".", "number_users", ",", "len", "(", "self", ".", "metadata", "[", "'col_2'", "]", ")", ")", ")", "meta_to_meta_id", "=", "{", "}", "for", "m", ",", "data", "in", "enumerate", "(", "self", ".", "metadata", "[", "'col_2'", "]", ")", ":", "meta_to_meta_id", "[", "data", "]", "=", "m", "for", "user_m", "in", "self", ".", "metadata", "[", "'col_1'", "]", ":", "for", "m1", "in", "self", ".", "metadata", "[", "'dict'", "]", "[", "user_m", "]", ":", "self", ".", "x", "[", "self", ".", "user_to_user_id", "[", "user_m", "]", ",", "meta_to_meta_id", "[", "m1", "]", "]", "=", "self", ".", "metadata", "[", "'dict'", "]", "[", "user_m", "]", "[", "m1", "]", "# create header info for metadata", "sparsity", "=", "(", "1", "-", "(", "self", ".", "metadata", "[", "'number_interactions'", "]", "/", "(", "len", "(", "self", ".", "metadata", "[", "'col_1'", "]", ")", "*", "len", "(", "self", ".", "metadata", "[", "'col_2'", "]", ")", ")", ")", ")", "*", "100", "self", ".", "extra_info_header", "=", "\">> metadata:: %d users and %d metadata (%d interactions) | sparsity:: %.2f%%\"", "%", "(", "len", "(", "self", ".", "metadata", "[", "'col_1'", "]", ")", ",", "len", "(", "self", ".", "metadata", "[", "'col_2'", "]", ")", ",", "self", ".", "metadata", "[", "'number_interactions'", "]", ",", "sparsity", ")", "self", ".", "number_metadata", "=", "len", "(", "self", ".", "metadata", "[", "'col_2'", "]", ")", "for", "u", "in", "range", "(", "self", ".", "number_users", ")", ":", "self", ".", "non_zero_x", ".", "append", "(", "list", "(", "np", ".", "where", "(", "self", ".", "x", "[", "u", "]", "!=", "0", ")", "[", "0", "]", ")", ")", "with", "np", ".", "errstate", "(", "divide", "=", "'ignore'", ")", ":", "self", ".", "d", ".", "append", "(", "1", "/", "np", ".", "dot", "(", "self", ".", "x", "[", "u", "]", ".", "T", ",", "self", ".", "x", "[", "u", "]", ")", ")", "# Create Factors", "self", ".", "create_factors", "(", ")" ]
[ 127, 4 ]
[ 168, 29 ]
python
en
['en', 'error', 'th']
False
UserNSVD1.fit
(self)
This method performs iterations of stochastic gradient ascent over the training data.
This method performs iterations of stochastic gradient ascent over the training data.
def fit(self): """ This method performs iterations of stochastic gradient ascent over the training data. """ for k in range(self.epochs): rmse = 0 count_error = 0 if self.batch: self.p = np.dot(self.x, self.w) for u, user in enumerate(self.users): c, e = self.update_factors(user, u) rmse += e count_error += c for _ in range(self.n2): for u, user in enumerate(self.users): e = self.p[u] - (np.dot(self.x[u], self.w)) for l in self.non_zero_x[u]: self.w[l] += self.learn_rate2 * (self.d[u] * np.dot(self.x[u][l], e.T) - (self.w[l] * self.delta2)) self.p = np.dot(self.x, self.w) else: for u, user in enumerate(self.users): self.p[u] = np.dot(self.x[u], self.w) a = np.array(self.p[u]) c, e = self.update_factors(user, u) rmse += e count_error += c for l in self.non_zero_x[u]: self.w[l] += self.d[u] * self.x[u][l] * (self.p[u] - a) rmse = np.sqrt(rmse / float(count_error)) if (np.fabs(rmse - self.last_rmse)) <= self.stop_criteria: break else: self.last_rmse = rmse
[ "def", "fit", "(", "self", ")", ":", "for", "k", "in", "range", "(", "self", ".", "epochs", ")", ":", "rmse", "=", "0", "count_error", "=", "0", "if", "self", ".", "batch", ":", "self", ".", "p", "=", "np", ".", "dot", "(", "self", ".", "x", ",", "self", ".", "w", ")", "for", "u", ",", "user", "in", "enumerate", "(", "self", ".", "users", ")", ":", "c", ",", "e", "=", "self", ".", "update_factors", "(", "user", ",", "u", ")", "rmse", "+=", "e", "count_error", "+=", "c", "for", "_", "in", "range", "(", "self", ".", "n2", ")", ":", "for", "u", ",", "user", "in", "enumerate", "(", "self", ".", "users", ")", ":", "e", "=", "self", ".", "p", "[", "u", "]", "-", "(", "np", ".", "dot", "(", "self", ".", "x", "[", "u", "]", ",", "self", ".", "w", ")", ")", "for", "l", "in", "self", ".", "non_zero_x", "[", "u", "]", ":", "self", ".", "w", "[", "l", "]", "+=", "self", ".", "learn_rate2", "*", "(", "self", ".", "d", "[", "u", "]", "*", "np", ".", "dot", "(", "self", ".", "x", "[", "u", "]", "[", "l", "]", ",", "e", ".", "T", ")", "-", "(", "self", ".", "w", "[", "l", "]", "*", "self", ".", "delta2", ")", ")", "self", ".", "p", "=", "np", ".", "dot", "(", "self", ".", "x", ",", "self", ".", "w", ")", "else", ":", "for", "u", ",", "user", "in", "enumerate", "(", "self", ".", "users", ")", ":", "self", ".", "p", "[", "u", "]", "=", "np", ".", "dot", "(", "self", ".", "x", "[", "u", "]", ",", "self", ".", "w", ")", "a", "=", "np", ".", "array", "(", "self", ".", "p", "[", "u", "]", ")", "c", ",", "e", "=", "self", ".", "update_factors", "(", "user", ",", "u", ")", "rmse", "+=", "e", "count_error", "+=", "c", "for", "l", "in", "self", ".", "non_zero_x", "[", "u", "]", ":", "self", ".", "w", "[", "l", "]", "+=", "self", ".", "d", "[", "u", "]", "*", "self", ".", "x", "[", "u", "]", "[", "l", "]", "*", "(", "self", ".", "p", "[", "u", "]", "-", "a", ")", "rmse", "=", "np", ".", "sqrt", "(", "rmse", "/", "float", "(", "count_error", ")", ")", "if", "(", "np", ".", "fabs", "(", "rmse", "-", "self", ".", "last_rmse", ")", ")", "<=", "self", ".", "stop_criteria", ":", "break", "else", ":", "self", ".", "last_rmse", "=", "rmse" ]
[ 170, 4 ]
[ 215, 37 ]
python
en
['en', 'error', 'th']
False
UserNSVD1.update_factors
(self, user, u)
Update latent factors according to the stochastic gradient descent update rule :param user: User :type user: int :param u: User ID from self.users :type u: int :return: error and count
Update latent factors according to the stochastic gradient descent update rule
def update_factors(self, user, u): """ Update latent factors according to the stochastic gradient descent update rule :param user: User :type user: int :param u: User ID from self.users :type u: int :return: error and count """ c, e = 0, 0 for item in self.train_set['items_seen_by_user'].get(user, []): i = self.item_to_item_id[item] rui = self._predict(u, i) error = self.train_set['feedback'][user][item] - rui b = np.array(self.q[i]) # update factors self.p[u] += self.learn_rate * (error * b - self.delta * self.p[u]) self.q[i] += self.learn_rate * (error * self.p[u] - self.delta * self.q[i]) self.b[u] += self.learn_rate * (error - self.delta * self.b[u]) self.c[i] += self.learn_rate * (error - self.delta * self.c[i]) c += 1 e += error ** 2 return c, e
[ "def", "update_factors", "(", "self", ",", "user", ",", "u", ")", ":", "c", ",", "e", "=", "0", ",", "0", "for", "item", "in", "self", ".", "train_set", "[", "'items_seen_by_user'", "]", ".", "get", "(", "user", ",", "[", "]", ")", ":", "i", "=", "self", ".", "item_to_item_id", "[", "item", "]", "rui", "=", "self", ".", "_predict", "(", "u", ",", "i", ")", "error", "=", "self", ".", "train_set", "[", "'feedback'", "]", "[", "user", "]", "[", "item", "]", "-", "rui", "b", "=", "np", ".", "array", "(", "self", ".", "q", "[", "i", "]", ")", "# update factors", "self", ".", "p", "[", "u", "]", "+=", "self", ".", "learn_rate", "*", "(", "error", "*", "b", "-", "self", ".", "delta", "*", "self", ".", "p", "[", "u", "]", ")", "self", ".", "q", "[", "i", "]", "+=", "self", ".", "learn_rate", "*", "(", "error", "*", "self", ".", "p", "[", "u", "]", "-", "self", ".", "delta", "*", "self", ".", "q", "[", "i", "]", ")", "self", ".", "b", "[", "u", "]", "+=", "self", ".", "learn_rate", "*", "(", "error", "-", "self", ".", "delta", "*", "self", ".", "b", "[", "u", "]", ")", "self", ".", "c", "[", "i", "]", "+=", "self", ".", "learn_rate", "*", "(", "error", "-", "self", ".", "delta", "*", "self", ".", "c", "[", "i", "]", ")", "c", "+=", "1", "e", "+=", "error", "**", "2", "return", "c", ",", "e" ]
[ 217, 4 ]
[ 246, 19 ]
python
en
['en', 'error', 'th']
False
UserNSVD1.compute
(self, verbose=True, metrics=None, verbose_evaluation=True, as_table=False, table_sep='\t')
Extends compute method from BaseRatingPrediction. Method to run recommender algorithm :param verbose: Print recommender and database information :type verbose: bool, default True :param metrics: List of evaluation measures :type metrics: list, default None :param verbose_evaluation: Print the evaluation results :type verbose_evaluation: bool, default True :param as_table: Print the evaluation results as table :type as_table: bool, default False :param table_sep: Delimiter for print results (only work with verbose=True and as_table=True) :type table_sep: str, default '\t'
Extends compute method from BaseRatingPrediction. Method to run recommender algorithm
def compute(self, verbose=True, metrics=None, verbose_evaluation=True, as_table=False, table_sep='\t'): """ Extends compute method from BaseRatingPrediction. Method to run recommender algorithm :param verbose: Print recommender and database information :type verbose: bool, default True :param metrics: List of evaluation measures :type metrics: list, default None :param verbose_evaluation: Print the evaluation results :type verbose_evaluation: bool, default True :param as_table: Print the evaluation results as table :type as_table: bool, default False :param table_sep: Delimiter for print results (only work with verbose=True and as_table=True) :type table_sep: str, default '\t' """ super(UserNSVD1, self).compute(verbose=verbose) if verbose: self.init_model() if self.extra_info_header is not None: print(self.extra_info_header) print("training_time:: %4f sec" % timed(self.fit)) print("prediction_time:: %4f sec" % timed(self.predict)) print('\n') else: # Execute all in silence without prints self.init_model() self.fit() self.predict() self.write_predictions() if self.test_file is not None: self.evaluate(metrics, verbose_evaluation, as_table=as_table, table_sep=table_sep)
[ "def", "compute", "(", "self", ",", "verbose", "=", "True", ",", "metrics", "=", "None", ",", "verbose_evaluation", "=", "True", ",", "as_table", "=", "False", ",", "table_sep", "=", "'\\t'", ")", ":", "super", "(", "UserNSVD1", ",", "self", ")", ".", "compute", "(", "verbose", "=", "verbose", ")", "if", "verbose", ":", "self", ".", "init_model", "(", ")", "if", "self", ".", "extra_info_header", "is", "not", "None", ":", "print", "(", "self", ".", "extra_info_header", ")", "print", "(", "\"training_time:: %4f sec\"", "%", "timed", "(", "self", ".", "fit", ")", ")", "print", "(", "\"prediction_time:: %4f sec\"", "%", "timed", "(", "self", ".", "predict", ")", ")", "print", "(", "'\\n'", ")", "else", ":", "# Execute all in silence without prints", "self", ".", "init_model", "(", ")", "self", ".", "fit", "(", ")", "self", ".", "predict", "(", ")", "self", ".", "write_predictions", "(", ")", "if", "self", ".", "test_file", "is", "not", "None", ":", "self", ".", "evaluate", "(", "metrics", ",", "verbose_evaluation", ",", "as_table", "=", "as_table", ",", "table_sep", "=", "table_sep", ")" ]
[ 248, 4 ]
[ 288, 94 ]
python
en
['en', 'error', 'th']
False
newer_pairwise_group
(sources_groups, targets)
Walk both arguments in parallel, testing if each source group is newer than its corresponding target. Returns a pair of lists (sources_groups, targets) where sources is newer than target, according to the semantics of 'newer_group()'.
Walk both arguments in parallel, testing if each source group is newer than its corresponding target. Returns a pair of lists (sources_groups, targets) where sources is newer than target, according to the semantics of 'newer_group()'.
def newer_pairwise_group(sources_groups, targets): """Walk both arguments in parallel, testing if each source group is newer than its corresponding target. Returns a pair of lists (sources_groups, targets) where sources is newer than target, according to the semantics of 'newer_group()'. """ if len(sources_groups) != len(targets): raise ValueError("'sources_group' and 'targets' must be the same length") # build a pair of lists (sources_groups, targets) where source is newer n_sources = [] n_targets = [] for i in range(len(sources_groups)): if newer_group(sources_groups[i], targets[i]): n_sources.append(sources_groups[i]) n_targets.append(targets[i]) return n_sources, n_targets
[ "def", "newer_pairwise_group", "(", "sources_groups", ",", "targets", ")", ":", "if", "len", "(", "sources_groups", ")", "!=", "len", "(", "targets", ")", ":", "raise", "ValueError", "(", "\"'sources_group' and 'targets' must be the same length\"", ")", "# build a pair of lists (sources_groups, targets) where source is newer", "n_sources", "=", "[", "]", "n_targets", "=", "[", "]", "for", "i", "in", "range", "(", "len", "(", "sources_groups", ")", ")", ":", "if", "newer_group", "(", "sources_groups", "[", "i", "]", ",", "targets", "[", "i", "]", ")", ":", "n_sources", ".", "append", "(", "sources_groups", "[", "i", "]", ")", "n_targets", ".", "append", "(", "targets", "[", "i", "]", ")", "return", "n_sources", ",", "n_targets" ]
[ 5, 0 ]
[ 22, 31 ]
python
en
['en', 'en', 'en']
True
_parse_arguments
(argv)
Parses command-line arguments.
Parses command-line arguments.
def _parse_arguments(argv): """Parses command-line arguments.""" parser = argparse.ArgumentParser() parser.add_argument( '--epochs', help='The number of epochs to train', type=int, default=5) parser.add_argument( '--steps_per_epoch', help='The number of steps per epoch to train', type=int, default=500) parser.add_argument( '--train_path', help='The path to the training data', type=str, default="gs://cloud-ml-data/img/flower_photos/train_set.csv") parser.add_argument( '--eval_path', help='The path to the evaluation data', type=str, default="gs://cloud-ml-data/img/flower_photos/eval_set.csv") parser.add_argument( '--tpu_address', help='The path to the evaluation data', type=str, required=True) parser.add_argument( '--hub_path', help='The path to TF Hub module to use in GCS', type=str, required=True) parser.add_argument( '--job-dir', help='Directory where to save the given model', type=str, required=True) return parser.parse_known_args(argv)
[ "def", "_parse_arguments", "(", "argv", ")", ":", "parser", "=", "argparse", ".", "ArgumentParser", "(", ")", "parser", ".", "add_argument", "(", "'--epochs'", ",", "help", "=", "'The number of epochs to train'", ",", "type", "=", "int", ",", "default", "=", "5", ")", "parser", ".", "add_argument", "(", "'--steps_per_epoch'", ",", "help", "=", "'The number of steps per epoch to train'", ",", "type", "=", "int", ",", "default", "=", "500", ")", "parser", ".", "add_argument", "(", "'--train_path'", ",", "help", "=", "'The path to the training data'", ",", "type", "=", "str", ",", "default", "=", "\"gs://cloud-ml-data/img/flower_photos/train_set.csv\"", ")", "parser", ".", "add_argument", "(", "'--eval_path'", ",", "help", "=", "'The path to the evaluation data'", ",", "type", "=", "str", ",", "default", "=", "\"gs://cloud-ml-data/img/flower_photos/eval_set.csv\"", ")", "parser", ".", "add_argument", "(", "'--tpu_address'", ",", "help", "=", "'The path to the evaluation data'", ",", "type", "=", "str", ",", "required", "=", "True", ")", "parser", ".", "add_argument", "(", "'--hub_path'", ",", "help", "=", "'The path to TF Hub module to use in GCS'", ",", "type", "=", "str", ",", "required", "=", "True", ")", "parser", ".", "add_argument", "(", "'--job-dir'", ",", "help", "=", "'Directory where to save the given model'", ",", "type", "=", "str", ",", "required", "=", "True", ")", "return", "parser", ".", "parse_known_args", "(", "argv", ")" ]
[ 11, 0 ]
[ 42, 40 ]
python
en
['en', 'fr', 'en']
True
main
()
Parses command line arguments and kicks off model training.
Parses command line arguments and kicks off model training.
def main(): """Parses command line arguments and kicks off model training.""" args = _parse_arguments(sys.argv[1:])[0] # TODO: define a TPU strategy resolver = tf.distribute.cluster_resolver.TPUClusterResolver( tpu=args.tpu_address) tf.config.experimental_connect_to_cluster(resolver) tf.tpu.experimental.initialize_tpu_system(resolver) strategy = tf.distribute.experimental.TPUStrategy(resolver) with strategy.scope(): train_data = util.load_dataset(args.train_path) eval_data = util.load_dataset(args.eval_path, training=False) image_model = model.build_model(args.job_dir, args.hub_path) model_history = model.train_and_evaluate( image_model, args.epochs, args.steps_per_epoch, train_data, eval_data, args.job_dir)
[ "def", "main", "(", ")", ":", "args", "=", "_parse_arguments", "(", "sys", ".", "argv", "[", "1", ":", "]", ")", "[", "0", "]", "# TODO: define a TPU strategy", "resolver", "=", "tf", ".", "distribute", ".", "cluster_resolver", ".", "TPUClusterResolver", "(", "tpu", "=", "args", ".", "tpu_address", ")", "tf", ".", "config", ".", "experimental_connect_to_cluster", "(", "resolver", ")", "tf", ".", "tpu", ".", "experimental", ".", "initialize_tpu_system", "(", "resolver", ")", "strategy", "=", "tf", ".", "distribute", ".", "experimental", ".", "TPUStrategy", "(", "resolver", ")", "with", "strategy", ".", "scope", "(", ")", ":", "train_data", "=", "util", ".", "load_dataset", "(", "args", ".", "train_path", ")", "eval_data", "=", "util", ".", "load_dataset", "(", "args", ".", "eval_path", ",", "training", "=", "False", ")", "image_model", "=", "model", ".", "build_model", "(", "args", ".", "job_dir", ",", "args", ".", "hub_path", ")", "model_history", "=", "model", ".", "train_and_evaluate", "(", "image_model", ",", "args", ".", "epochs", ",", "args", ".", "steps_per_epoch", ",", "train_data", ",", "eval_data", ",", "args", ".", "job_dir", ")" ]
[ 45, 0 ]
[ 63, 44 ]
python
en
['en', 'en', 'en']
True
_have_cython
()
Return True if Cython can be imported.
Return True if Cython can be imported.
def _have_cython(): """ Return True if Cython can be imported. """ cython_impl = 'Cython.Distutils.build_ext' try: # from (cython_impl) import build_ext __import__(cython_impl, fromlist=['build_ext']).build_ext return True except Exception: pass return False
[ "def", "_have_cython", "(", ")", ":", "cython_impl", "=", "'Cython.Distutils.build_ext'", "try", ":", "# from (cython_impl) import build_ext", "__import__", "(", "cython_impl", ",", "fromlist", "=", "[", "'build_ext'", "]", ")", ".", "build_ext", "return", "True", "except", "Exception", ":", "pass", "return", "False" ]
[ 11, 0 ]
[ 22, 16 ]
python
en
['en', 'error', 'th']
False
Extension._convert_pyx_sources_to_lang
(self)
Replace sources with .pyx extensions to sources with the target language extension. This mechanism allows language authors to supply pre-converted sources but to prefer the .pyx sources.
Replace sources with .pyx extensions to sources with the target language extension. This mechanism allows language authors to supply pre-converted sources but to prefer the .pyx sources.
def _convert_pyx_sources_to_lang(self): """ Replace sources with .pyx extensions to sources with the target language extension. This mechanism allows language authors to supply pre-converted sources but to prefer the .pyx sources. """ if _have_cython(): # the build has Cython, so allow it to compile the .pyx files return lang = self.language or '' target_ext = '.cpp' if lang.lower() == 'c++' else '.c' sub = functools.partial(re.sub, '.pyx$', target_ext) self.sources = list(map(sub, self.sources))
[ "def", "_convert_pyx_sources_to_lang", "(", "self", ")", ":", "if", "_have_cython", "(", ")", ":", "# the build has Cython, so allow it to compile the .pyx files", "return", "lang", "=", "self", ".", "language", "or", "''", "target_ext", "=", "'.cpp'", "if", "lang", ".", "lower", "(", ")", "==", "'c++'", "else", "'.c'", "sub", "=", "functools", ".", "partial", "(", "re", ".", "sub", ",", "'.pyx$'", ",", "target_ext", ")", "self", ".", "sources", "=", "list", "(", "map", "(", "sub", ",", "self", ".", "sources", ")", ")" ]
[ 40, 4 ]
[ 52, 51 ]
python
en
['en', 'error', 'th']
False
CrossValidation.__init__
(self, input_file, recommender, dir_folds, k_folds=10, header=None, sep='\t', write_predictions=False, write_sep='\t', recommender_verbose=False, evaluation_in_fold_verbose=True, metrics=None, as_table=False, table_sep='\t', del_folds=False, random_seed=None)
Cross Validation This strategy is responsible to divide the database in K folds, in which each fold contain a train and a test set. Its also responsible to run and evaluate the recommender results in each fold and calculate the mean and the standard deviation. Usage: >> rec = MostPopular(as_binary=True) >> CrossValidation(db, rec, fold_d, evaluation_in_fold_verbose=False).compute() :param input_file: Database file :type input_file: str :param recommender: Initialize the recommender algorithm. e.g.: MostPopular(as_binary=True) :type recommender: class :param dir_folds: Directory to write folds (train and test files) :type dir_folds: str :param k_folds: How much folds the strategy will divide :type k_folds: int, default 10 :param header: Skip header line :type header: int, default None :param sep: Delimiter for input files :type sep: str, default '\t' :param write_predictions: Write the recommender predictions in each fold :type write_predictions: bool, default False :param write_sep: Delimiter for output files :type write_sep: str, default '\t' :param recommender_verbose: Print header of recommender in each fold :type recommender_verbose: bool, default False :param evaluation_in_fold_verbose: Print evaluation of recommender in each fold :type evaluation_in_fold_verbose: bool, default True :param metrics: List of evaluation metrics :type metrics: str, default None :param as_table: Print the evaluation results as table :type as_table: bool, default False :param table_sep: Delimiter for print results (only work with verbose=True and as_table=True) :type table_sep: str, default '\t' :param del_folds: Delete folds after evaluation :type del_folds: bool, default False :param random_seed: Random seed :type random_seed: int, default None
Cross Validation
def __init__(self, input_file, recommender, dir_folds, k_folds=10, header=None, sep='\t', write_predictions=False, write_sep='\t', recommender_verbose=False, evaluation_in_fold_verbose=True, metrics=None, as_table=False, table_sep='\t', del_folds=False, random_seed=None): """ Cross Validation This strategy is responsible to divide the database in K folds, in which each fold contain a train and a test set. Its also responsible to run and evaluate the recommender results in each fold and calculate the mean and the standard deviation. Usage: >> rec = MostPopular(as_binary=True) >> CrossValidation(db, rec, fold_d, evaluation_in_fold_verbose=False).compute() :param input_file: Database file :type input_file: str :param recommender: Initialize the recommender algorithm. e.g.: MostPopular(as_binary=True) :type recommender: class :param dir_folds: Directory to write folds (train and test files) :type dir_folds: str :param k_folds: How much folds the strategy will divide :type k_folds: int, default 10 :param header: Skip header line :type header: int, default None :param sep: Delimiter for input files :type sep: str, default '\t' :param write_predictions: Write the recommender predictions in each fold :type write_predictions: bool, default False :param write_sep: Delimiter for output files :type write_sep: str, default '\t' :param recommender_verbose: Print header of recommender in each fold :type recommender_verbose: bool, default False :param evaluation_in_fold_verbose: Print evaluation of recommender in each fold :type evaluation_in_fold_verbose: bool, default True :param metrics: List of evaluation metrics :type metrics: str, default None :param as_table: Print the evaluation results as table :type as_table: bool, default False :param table_sep: Delimiter for print results (only work with verbose=True and as_table=True) :type table_sep: str, default '\t' :param del_folds: Delete folds after evaluation :type del_folds: bool, default False :param random_seed: Random seed :type random_seed: int, default None """ self.input_file = input_file self.recommender = recommender self.dir_folds = dir_folds self.k_folds = k_folds self.header = header self.sep = sep self.write_predictions = write_predictions self.write_sep = write_sep self.recommender_verbose = recommender_verbose self.evaluation_in_fold_verbose = evaluation_in_fold_verbose self.metrics = metrics self.as_table = as_table self.table_sep = table_sep self.del_folds = del_folds self.random_seed = random_seed # internal vars self.folds_results = defaultdict(list)
[ "def", "__init__", "(", "self", ",", "input_file", ",", "recommender", ",", "dir_folds", ",", "k_folds", "=", "10", ",", "header", "=", "None", ",", "sep", "=", "'\\t'", ",", "write_predictions", "=", "False", ",", "write_sep", "=", "'\\t'", ",", "recommender_verbose", "=", "False", ",", "evaluation_in_fold_verbose", "=", "True", ",", "metrics", "=", "None", ",", "as_table", "=", "False", ",", "table_sep", "=", "'\\t'", ",", "del_folds", "=", "False", ",", "random_seed", "=", "None", ")", ":", "self", ".", "input_file", "=", "input_file", "self", ".", "recommender", "=", "recommender", "self", ".", "dir_folds", "=", "dir_folds", "self", ".", "k_folds", "=", "k_folds", "self", ".", "header", "=", "header", "self", ".", "sep", "=", "sep", "self", ".", "write_predictions", "=", "write_predictions", "self", ".", "write_sep", "=", "write_sep", "self", ".", "recommender_verbose", "=", "recommender_verbose", "self", ".", "evaluation_in_fold_verbose", "=", "evaluation_in_fold_verbose", "self", ".", "metrics", "=", "metrics", "self", ".", "as_table", "=", "as_table", "self", ".", "table_sep", "=", "table_sep", "self", ".", "del_folds", "=", "del_folds", "self", ".", "random_seed", "=", "random_seed", "# internal vars", "self", ".", "folds_results", "=", "defaultdict", "(", "list", ")" ]
[ 18, 4 ]
[ 96, 46 ]
python
en
['en', 'error', 'th']
False
CrossValidation.generate_folds
(self)
Method to generate folds with k fold cross validation
Method to generate folds with k fold cross validation
def generate_folds(self): """ Method to generate folds with k fold cross validation """ SplitDatabase(input_file=self.input_file, n_splits=self.k_folds, dir_folds=self.dir_folds, sep_read=self.sep, header=self.header).kfoldcrossvalidation(random_state=self.random_seed)
[ "def", "generate_folds", "(", "self", ")", ":", "SplitDatabase", "(", "input_file", "=", "self", ".", "input_file", ",", "n_splits", "=", "self", ".", "k_folds", ",", "dir_folds", "=", "self", ".", "dir_folds", ",", "sep_read", "=", "self", ".", "sep", ",", "header", "=", "self", ".", "header", ")", ".", "kfoldcrossvalidation", "(", "random_state", "=", "self", ".", "random_seed", ")" ]
[ 98, 4 ]
[ 105, 112 ]
python
en
['en', 'error', 'th']
False
CrossValidation.execute_algorithm
(self)
Method to run recommender algorithm in k folds
Method to run recommender algorithm in k folds
def execute_algorithm(self): """ Method to run recommender algorithm in k folds """ for k in range(self.k_folds): train_file = self.dir_folds + 'folds/%d/train.dat' % k test_file = self.dir_folds + 'folds/%d/test.dat' % k self.recommender.train_file = train_file self.recommender.test_file = test_file if self.write_predictions: output_file = self.dir_folds + 'folds/%d/output.dat' % k self.recommender.output_file = output_file self.recommender.compute(verbose=self.recommender_verbose, verbose_evaluation=self.evaluation_in_fold_verbose, metrics=self.metrics) if self.metrics is None: self.metrics = self.recommender.evaluation_results.keys() for metric in self.metrics: self.folds_results[metric.upper()].append(self.recommender.evaluation_results[metric.upper()])
[ "def", "execute_algorithm", "(", "self", ")", ":", "for", "k", "in", "range", "(", "self", ".", "k_folds", ")", ":", "train_file", "=", "self", ".", "dir_folds", "+", "'folds/%d/train.dat'", "%", "k", "test_file", "=", "self", ".", "dir_folds", "+", "'folds/%d/test.dat'", "%", "k", "self", ".", "recommender", ".", "train_file", "=", "train_file", "self", ".", "recommender", ".", "test_file", "=", "test_file", "if", "self", ".", "write_predictions", ":", "output_file", "=", "self", ".", "dir_folds", "+", "'folds/%d/output.dat'", "%", "k", "self", ".", "recommender", ".", "output_file", "=", "output_file", "self", ".", "recommender", ".", "compute", "(", "verbose", "=", "self", ".", "recommender_verbose", ",", "verbose_evaluation", "=", "self", ".", "evaluation_in_fold_verbose", ",", "metrics", "=", "self", ".", "metrics", ")", "if", "self", ".", "metrics", "is", "None", ":", "self", ".", "metrics", "=", "self", ".", "recommender", ".", "evaluation_results", ".", "keys", "(", ")", "for", "metric", "in", "self", ".", "metrics", ":", "self", ".", "folds_results", "[", "metric", ".", "upper", "(", ")", "]", ".", "append", "(", "self", ".", "recommender", ".", "evaluation_results", "[", "metric", ".", "upper", "(", ")", "]", ")" ]
[ 107, 4 ]
[ 131, 110 ]
python
en
['en', 'error', 'th']
False
CrossValidation.evaluate
(self, verbose=True)
Method to evaluate folds results and generate mean and standard deviation :param verbose: If True, print evaluation results :type verbose: bool, default True
Method to evaluate folds results and generate mean and standard deviation
def evaluate(self, verbose=True): """ Method to evaluate folds results and generate mean and standard deviation :param verbose: If True, print evaluation results :type verbose: bool, default True """ mean_dict = defaultdict(dict) std_dict = defaultdict(dict) for metric in self.metrics: mean_dict[metric.upper()] = np.mean(self.folds_results[metric.upper()]) std_dict[metric.upper()] = np.std(self.folds_results[metric.upper()]) if verbose: if self.as_table: header = '' values_mean = '' values_std = '' for metric in self.metrics: header += metric.upper() + self.table_sep values_mean += str(round(mean_dict[metric.upper()], 6)) + self.table_sep values_std += str(round(std_dict[metric.upper()], 6)) + self.table_sep print('Metric%s%s' % (self.table_sep, header)) print('Mean%s%s' % (self.table_sep, values_mean)) print('STD%s%s' % (self.table_sep, values_std)) else: evaluation_mean = 'Mean:: ' evaluation_std = 'STD:: ' for metrics in self.metrics: evaluation_mean += "%s: %.6f " % (metrics.upper(), mean_dict[metrics.upper()]) evaluation_std += "%s: %.6f " % (metrics.upper(), std_dict[metrics.upper()]) print(evaluation_mean) print(evaluation_std)
[ "def", "evaluate", "(", "self", ",", "verbose", "=", "True", ")", ":", "mean_dict", "=", "defaultdict", "(", "dict", ")", "std_dict", "=", "defaultdict", "(", "dict", ")", "for", "metric", "in", "self", ".", "metrics", ":", "mean_dict", "[", "metric", ".", "upper", "(", ")", "]", "=", "np", ".", "mean", "(", "self", ".", "folds_results", "[", "metric", ".", "upper", "(", ")", "]", ")", "std_dict", "[", "metric", ".", "upper", "(", ")", "]", "=", "np", ".", "std", "(", "self", ".", "folds_results", "[", "metric", ".", "upper", "(", ")", "]", ")", "if", "verbose", ":", "if", "self", ".", "as_table", ":", "header", "=", "''", "values_mean", "=", "''", "values_std", "=", "''", "for", "metric", "in", "self", ".", "metrics", ":", "header", "+=", "metric", ".", "upper", "(", ")", "+", "self", ".", "table_sep", "values_mean", "+=", "str", "(", "round", "(", "mean_dict", "[", "metric", ".", "upper", "(", ")", "]", ",", "6", ")", ")", "+", "self", ".", "table_sep", "values_std", "+=", "str", "(", "round", "(", "std_dict", "[", "metric", ".", "upper", "(", ")", "]", ",", "6", ")", ")", "+", "self", ".", "table_sep", "print", "(", "'Metric%s%s'", "%", "(", "self", ".", "table_sep", ",", "header", ")", ")", "print", "(", "'Mean%s%s'", "%", "(", "self", ".", "table_sep", ",", "values_mean", ")", ")", "print", "(", "'STD%s%s'", "%", "(", "self", ".", "table_sep", ",", "values_std", ")", ")", "else", ":", "evaluation_mean", "=", "'Mean:: '", "evaluation_std", "=", "'STD:: '", "for", "metrics", "in", "self", ".", "metrics", ":", "evaluation_mean", "+=", "\"%s: %.6f \"", "%", "(", "metrics", ".", "upper", "(", ")", ",", "mean_dict", "[", "metrics", ".", "upper", "(", ")", "]", ")", "evaluation_std", "+=", "\"%s: %.6f \"", "%", "(", "metrics", ".", "upper", "(", ")", ",", "std_dict", "[", "metrics", ".", "upper", "(", ")", "]", ")", "print", "(", "evaluation_mean", ")", "print", "(", "evaluation_std", ")" ]
[ 133, 4 ]
[ 168, 37 ]
python
en
['en', 'error', 'th']
False
CrossValidation.erase_folds
(self)
Method to delete folds after evaluation
Method to delete folds after evaluation
def erase_folds(self): """ Method to delete folds after evaluation """ folds = self.dir_folds + 'folds/' shutil.rmtree(folds)
[ "def", "erase_folds", "(", "self", ")", ":", "folds", "=", "self", ".", "dir_folds", "+", "'folds/'", "shutil", ".", "rmtree", "(", "folds", ")" ]
[ 170, 4 ]
[ 177, 28 ]
python
en
['en', 'error', 'th']
False
CrossValidation.compute
(self, verbose=True)
Method to run the cross validation :param verbose: If True, print header :type verbose: bool, default True
Method to run the cross validation
def compute(self, verbose=True): """ Method to run the cross validation :param verbose: If True, print header :type verbose: bool, default True """ if verbose: print("[Case Recommender: Cross Validation]\n") print("Database:: %s \nRecommender Algorithm:: %s | K Folds: %d\n" % (self.input_file, self.recommender.recommender_name, self.k_folds)) self.generate_folds() self.execute_algorithm() self.evaluate(verbose) if self.del_folds: self.erase_folds()
[ "def", "compute", "(", "self", ",", "verbose", "=", "True", ")", ":", "if", "verbose", ":", "print", "(", "\"[Case Recommender: Cross Validation]\\n\"", ")", "print", "(", "\"Database:: %s \\nRecommender Algorithm:: %s | K Folds: %d\\n\"", "%", "(", "self", ".", "input_file", ",", "self", ".", "recommender", ".", "recommender_name", ",", "self", ".", "k_folds", ")", ")", "self", ".", "generate_folds", "(", ")", "self", ".", "execute_algorithm", "(", ")", "self", ".", "evaluate", "(", "verbose", ")", "if", "self", ".", "del_folds", ":", "self", ".", "erase_folds", "(", ")" ]
[ 179, 4 ]
[ 200, 30 ]
python
en
['en', 'error', 'th']
False
Greatest.as_sqlite
(self, compiler, connection, **extra_context)
Use the MAX function on SQLite.
Use the MAX function on SQLite.
def as_sqlite(self, compiler, connection, **extra_context): """Use the MAX function on SQLite.""" return super().as_sqlite(compiler, connection, function='MAX', **extra_context)
[ "def", "as_sqlite", "(", "self", ",", "compiler", ",", "connection", ",", "*", "*", "extra_context", ")", ":", "return", "super", "(", ")", ".", "as_sqlite", "(", "compiler", ",", "connection", ",", "function", "=", "'MAX'", ",", "*", "*", "extra_context", ")" ]
[ 105, 4 ]
[ 107, 87 ]
python
en
['en', 'en', 'en']
True
Least.as_sqlite
(self, compiler, connection, **extra_context)
Use the MIN function on SQLite.
Use the MIN function on SQLite.
def as_sqlite(self, compiler, connection, **extra_context): """Use the MIN function on SQLite.""" return super().as_sqlite(compiler, connection, function='MIN', **extra_context)
[ "def", "as_sqlite", "(", "self", ",", "compiler", ",", "connection", ",", "*", "*", "extra_context", ")", ":", "return", "super", "(", ")", ".", "as_sqlite", "(", "compiler", ",", "connection", ",", "function", "=", "'MIN'", ",", "*", "*", "extra_context", ")" ]
[ 165, 4 ]
[ 167, 87 ]
python
en
['en', 'en', 'en']
True
UserKNN.__init__
(self, train_file=None, test_file=None, output_file=None, similarity_metric="cosine", k_neighbors=None, as_similar_first=False, sep='\t', output_sep='\t')
UserKNN for rating prediction This algorithm predicts ratings for each user based on the similar items that his neighbors (similar users) consumed. Usage:: >> UserKNN(train, test).compute() >> UserKNN(train, test, ranking_file, as_similar_first=True, k_neighbors=60).compute() :param train_file: File which contains the train set. This file needs to have at least 3 columns (user item feedback_value). :type train_file: str :param test_file: File which contains the test set. This file needs to have at least 3 columns (user item feedback_value). :type test_file: str, default None :param output_file: File with dir to write the final predictions :type output_file: str, default None :param similarity_metric: Pairwise metric to compute the similarity between the users. Reference about distances: http://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.spatial.distance.pdist.html :type similarity_metric: str, default cosine :param k_neighbors: Number of neighbors to use. If None, k_neighbor = int(sqrt(n_users)) :type k_neighbors: int, default None :param as_similar_first: If True, for each unknown item, which will be predicted, we first look for its k most similar users and then take the intersection with the users that seen that item. :type as_similar_first: bool, default False :param sep: Delimiter for input files :type sep: str, default '\t' :param output_sep: Delimiter for output file :type output_sep: str, default '\t'
UserKNN for rating prediction
def __init__(self, train_file=None, test_file=None, output_file=None, similarity_metric="cosine", k_neighbors=None, as_similar_first=False, sep='\t', output_sep='\t'): """ UserKNN for rating prediction This algorithm predicts ratings for each user based on the similar items that his neighbors (similar users) consumed. Usage:: >> UserKNN(train, test).compute() >> UserKNN(train, test, ranking_file, as_similar_first=True, k_neighbors=60).compute() :param train_file: File which contains the train set. This file needs to have at least 3 columns (user item feedback_value). :type train_file: str :param test_file: File which contains the test set. This file needs to have at least 3 columns (user item feedback_value). :type test_file: str, default None :param output_file: File with dir to write the final predictions :type output_file: str, default None :param similarity_metric: Pairwise metric to compute the similarity between the users. Reference about distances: http://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.spatial.distance.pdist.html :type similarity_metric: str, default cosine :param k_neighbors: Number of neighbors to use. If None, k_neighbor = int(sqrt(n_users)) :type k_neighbors: int, default None :param as_similar_first: If True, for each unknown item, which will be predicted, we first look for its k most similar users and then take the intersection with the users that seen that item. :type as_similar_first: bool, default False :param sep: Delimiter for input files :type sep: str, default '\t' :param output_sep: Delimiter for output file :type output_sep: str, default '\t' """ super(UserKNN, self).__init__(train_file=train_file, test_file=test_file, output_file=output_file, similarity_metric=similarity_metric, sep=sep, output_sep=output_sep) self.recommender_name = 'UserKNN Algorithm' self.as_similar_first = as_similar_first self.k_neighbors = k_neighbors # internal vars self.su_matrix = None self.users_id_viewed_item = None
[ "def", "__init__", "(", "self", ",", "train_file", "=", "None", ",", "test_file", "=", "None", ",", "output_file", "=", "None", ",", "similarity_metric", "=", "\"cosine\"", ",", "k_neighbors", "=", "None", ",", "as_similar_first", "=", "False", ",", "sep", "=", "'\\t'", ",", "output_sep", "=", "'\\t'", ")", ":", "super", "(", "UserKNN", ",", "self", ")", ".", "__init__", "(", "train_file", "=", "train_file", ",", "test_file", "=", "test_file", ",", "output_file", "=", "output_file", ",", "similarity_metric", "=", "similarity_metric", ",", "sep", "=", "sep", ",", "output_sep", "=", "output_sep", ")", "self", ".", "recommender_name", "=", "'UserKNN Algorithm'", "self", ".", "as_similar_first", "=", "as_similar_first", "self", ".", "k_neighbors", "=", "k_neighbors", "# internal vars", "self", ".", "su_matrix", "=", "None", "self", ".", "users_id_viewed_item", "=", "None" ]
[ 24, 4 ]
[ 77, 40 ]
python
en
['en', 'error', 'th']
False
UserKNN.init_model
(self)
Method to initialize the model. Compute similarity matrix based on user (user x user)
Method to initialize the model. Compute similarity matrix based on user (user x user)
def init_model(self): """ Method to initialize the model. Compute similarity matrix based on user (user x user) """ super(UserKNN, self).init_model() self.users_id_viewed_item = {} # Set the value for k if self.k_neighbors is None: self.k_neighbors = int(np.sqrt(len(self.users))) self.su_matrix = self.compute_similarity(transpose=False) # Map the users which seen an item with their respective ids for item in self.items: for user in self.train_set['users_viewed_item'].get(item, []): self.users_id_viewed_item.setdefault(item, []).append(self.user_to_user_id[user])
[ "def", "init_model", "(", "self", ")", ":", "super", "(", "UserKNN", ",", "self", ")", ".", "init_model", "(", ")", "self", ".", "users_id_viewed_item", "=", "{", "}", "# Set the value for k", "if", "self", ".", "k_neighbors", "is", "None", ":", "self", ".", "k_neighbors", "=", "int", "(", "np", ".", "sqrt", "(", "len", "(", "self", ".", "users", ")", ")", ")", "self", ".", "su_matrix", "=", "self", ".", "compute_similarity", "(", "transpose", "=", "False", ")", "# Map the users which seen an item with their respective ids", "for", "item", "in", "self", ".", "items", ":", "for", "user", "in", "self", ".", "train_set", "[", "'users_viewed_item'", "]", ".", "get", "(", "item", ",", "[", "]", ")", ":", "self", ".", "users_id_viewed_item", ".", "setdefault", "(", "item", ",", "[", "]", ")", ".", "append", "(", "self", ".", "user_to_user_id", "[", "user", "]", ")" ]
[ 79, 4 ]
[ 98, 97 ]
python
en
['en', 'error', 'th']
False
UserKNN.predict
(self)
Method to predict ratings for all known users in the train set.
Method to predict ratings for all known users in the train set.
def predict(self): """ Method to predict ratings for all known users in the train set. """ for user in self.users: if len(self.train_set['feedback'].get(user, [])) != 0: if self.test_file is not None: if self.as_similar_first: self.predictions += self.predict_similar_first_scores(user, self.test_set['items_seen_by_user'] .get(user, [])) else: self.predictions += self.predict_scores(user, self.test_set['items_seen_by_user'].get(user, [])) else: # Selects items that user has not interacted with. items_seen_by_user = [] u_list = list(np.flatnonzero(self.matrix[self.user_to_user_id[user]] == 0)) for item_id in u_list: items_seen_by_user.append(self.item_id_to_item[item_id]) if self.as_similar_first: self.predictions += self.predict_similar_first_scores(user, items_seen_by_user) else: self.predictions += self.predict_scores(user, items_seen_by_user) else: # Implement cold start user pass
[ "def", "predict", "(", "self", ")", ":", "for", "user", "in", "self", ".", "users", ":", "if", "len", "(", "self", ".", "train_set", "[", "'feedback'", "]", ".", "get", "(", "user", ",", "[", "]", ")", ")", "!=", "0", ":", "if", "self", ".", "test_file", "is", "not", "None", ":", "if", "self", ".", "as_similar_first", ":", "self", ".", "predictions", "+=", "self", ".", "predict_similar_first_scores", "(", "user", ",", "self", ".", "test_set", "[", "'items_seen_by_user'", "]", ".", "get", "(", "user", ",", "[", "]", ")", ")", "else", ":", "self", ".", "predictions", "+=", "self", ".", "predict_scores", "(", "user", ",", "self", ".", "test_set", "[", "'items_seen_by_user'", "]", ".", "get", "(", "user", ",", "[", "]", ")", ")", "else", ":", "# Selects items that user has not interacted with.", "items_seen_by_user", "=", "[", "]", "u_list", "=", "list", "(", "np", ".", "flatnonzero", "(", "self", ".", "matrix", "[", "self", ".", "user_to_user_id", "[", "user", "]", "]", "==", "0", ")", ")", "for", "item_id", "in", "u_list", ":", "items_seen_by_user", ".", "append", "(", "self", ".", "item_id_to_item", "[", "item_id", "]", ")", "if", "self", ".", "as_similar_first", ":", "self", ".", "predictions", "+=", "self", ".", "predict_similar_first_scores", "(", "user", ",", "items_seen_by_user", ")", "else", ":", "self", ".", "predictions", "+=", "self", ".", "predict_scores", "(", "user", ",", "items_seen_by_user", ")", "else", ":", "# Implement cold start user", "pass" ]
[ 100, 4 ]
[ 127, 20 ]
python
en
['en', 'error', 'th']
False
UserKNN.predict_scores
(self, user, unpredicted_items)
In this implementation, for each unknown item, which will be predicted, we first look for users that seen that item and calculate the similarity between them and the user. Then we sort these similarities and get the most similar k's. Finally, the score of the unknown item will be the sum of the similarities. rui = bui + (sum((rvi - bvi) * sim(u,v)) / sum(sim(u,v))) :param user: User :type user: int :param unpredicted_items: A list of unknown items for each user :type unpredicted_items: list :return: Sorted list with triples user item rating :rtype: list
In this implementation, for each unknown item, which will be predicted, we first look for users that seen that item and calculate the similarity between them and the user. Then we sort these similarities and get the most similar k's. Finally, the score of the unknown item will be the sum of the similarities.
def predict_scores(self, user, unpredicted_items): """ In this implementation, for each unknown item, which will be predicted, we first look for users that seen that item and calculate the similarity between them and the user. Then we sort these similarities and get the most similar k's. Finally, the score of the unknown item will be the sum of the similarities. rui = bui + (sum((rvi - bvi) * sim(u,v)) / sum(sim(u,v))) :param user: User :type user: int :param unpredicted_items: A list of unknown items for each user :type unpredicted_items: list :return: Sorted list with triples user item rating :rtype: list """ u_id = self.user_to_user_id[user] predictions = [] for item in unpredicted_items: neighbors = [] rui = 0 sim_sum = 0 for user_v_id in self.users_id_viewed_item.get(item, []): user_v = self.user_id_to_user[user_v_id] neighbors.append((user_v, self.su_matrix[u_id, user_v_id], self.train_set['feedback'][user_v][item])) neighbors = sorted(neighbors, key=lambda x: -x[1]) if neighbors: for triple in neighbors[:self.k_neighbors]: rui += (triple[2] - self.bui[triple[0]][item]) * triple[1] if triple[1] != 0 else 0.001 sim_sum += triple[1] if triple[1] != 0 else 0.001 rui = self.bui[user][item] + (rui / sim_sum) else: rui = self.bui[user][item] # normalize the ratings based on the highest and lowest value. if rui > self.train_set["max_value"]: rui = self.train_set["max_value"] if rui < self.train_set["min_value"]: rui = self.train_set["min_value"] predictions.append((user, item, rui)) return sorted(predictions, key=lambda x: x[1])
[ "def", "predict_scores", "(", "self", ",", "user", ",", "unpredicted_items", ")", ":", "u_id", "=", "self", ".", "user_to_user_id", "[", "user", "]", "predictions", "=", "[", "]", "for", "item", "in", "unpredicted_items", ":", "neighbors", "=", "[", "]", "rui", "=", "0", "sim_sum", "=", "0", "for", "user_v_id", "in", "self", ".", "users_id_viewed_item", ".", "get", "(", "item", ",", "[", "]", ")", ":", "user_v", "=", "self", ".", "user_id_to_user", "[", "user_v_id", "]", "neighbors", ".", "append", "(", "(", "user_v", ",", "self", ".", "su_matrix", "[", "u_id", ",", "user_v_id", "]", ",", "self", ".", "train_set", "[", "'feedback'", "]", "[", "user_v", "]", "[", "item", "]", ")", ")", "neighbors", "=", "sorted", "(", "neighbors", ",", "key", "=", "lambda", "x", ":", "-", "x", "[", "1", "]", ")", "if", "neighbors", ":", "for", "triple", "in", "neighbors", "[", ":", "self", ".", "k_neighbors", "]", ":", "rui", "+=", "(", "triple", "[", "2", "]", "-", "self", ".", "bui", "[", "triple", "[", "0", "]", "]", "[", "item", "]", ")", "*", "triple", "[", "1", "]", "if", "triple", "[", "1", "]", "!=", "0", "else", "0.001", "sim_sum", "+=", "triple", "[", "1", "]", "if", "triple", "[", "1", "]", "!=", "0", "else", "0.001", "rui", "=", "self", ".", "bui", "[", "user", "]", "[", "item", "]", "+", "(", "rui", "/", "sim_sum", ")", "else", ":", "rui", "=", "self", ".", "bui", "[", "user", "]", "[", "item", "]", "# normalize the ratings based on the highest and lowest value.", "if", "rui", ">", "self", ".", "train_set", "[", "\"max_value\"", "]", ":", "rui", "=", "self", ".", "train_set", "[", "\"max_value\"", "]", "if", "rui", "<", "self", ".", "train_set", "[", "\"min_value\"", "]", ":", "rui", "=", "self", ".", "train_set", "[", "\"min_value\"", "]", "predictions", ".", "append", "(", "(", "user", ",", "item", ",", "rui", ")", ")", "return", "sorted", "(", "predictions", ",", "key", "=", "lambda", "x", ":", "x", "[", "1", "]", ")" ]
[ 129, 4 ]
[ 179, 54 ]
python
en
['en', 'error', 'th']
False
UserKNN.predict_similar_first_scores
(self, user, unpredicted_items)
In this implementation, for each unknown item, which will be predicted, we first look for its k most similar users and then take the intersection with the users that seen that item. Finally, the score of the unknown item will be the sum of the similarities. rui = bui + (sum((rvi - bvi) * sim(u,v)) / sum(sim(u,v))) :param user: User :type user: int :param unpredicted_items: A list of unknown items for each user :type unpredicted_items: list :return: Sorted list with triples user item rating :rtype: list
In this implementation, for each unknown item, which will be predicted, we first look for its k most similar users and then take the intersection with the users that seen that item. Finally, the score of the unknown item will be the sum of the similarities.
def predict_similar_first_scores(self, user, unpredicted_items): """ In this implementation, for each unknown item, which will be predicted, we first look for its k most similar users and then take the intersection with the users that seen that item. Finally, the score of the unknown item will be the sum of the similarities. rui = bui + (sum((rvi - bvi) * sim(u,v)) / sum(sim(u,v))) :param user: User :type user: int :param unpredicted_items: A list of unknown items for each user :type unpredicted_items: list :return: Sorted list with triples user item rating :rtype: list """ u_id = self.user_to_user_id[user] predictions = [] # Select user neighbors, sorting user similarity vector. Returns a list with index of sorting values neighbors = sorted(range(len(self.su_matrix[u_id])), key=lambda m: -self.su_matrix[u_id][m]) for item in unpredicted_items: rui = 0 sim_sum = 0 # Intersection bt. the neighbors closest to the user and the users who accessed the unknown item. common_users = list(set( self.users_id_viewed_item.get(item, [])).intersection(neighbors[1:self.k_neighbors])) if common_users: for user_v_id in common_users: user_v = self.user_id_to_user[user_v_id] sim_uv = self.su_matrix[u_id, user_v_id] rui += (self.train_set['feedback'][user_v][item] - self.bui[user_v][item]) * \ sim_uv if sim_sum != 0 else 0.001 sim_sum += sim_uv if sim_sum != 0 else 0.001 rui = self.bui[user][item] + (rui / sim_sum) else: rui = self.bui[user][item] # normalize the ratings based on the highest and lowest value. if rui > self.train_set["max_value"]: rui = self.train_set["max_value"] if rui < self.train_set["min_value"]: rui = self.train_set["min_value"] predictions.append((user, item, rui)) return sorted(predictions, key=lambda x: x[1])
[ "def", "predict_similar_first_scores", "(", "self", ",", "user", ",", "unpredicted_items", ")", ":", "u_id", "=", "self", ".", "user_to_user_id", "[", "user", "]", "predictions", "=", "[", "]", "# Select user neighbors, sorting user similarity vector. Returns a list with index of sorting values", "neighbors", "=", "sorted", "(", "range", "(", "len", "(", "self", ".", "su_matrix", "[", "u_id", "]", ")", ")", ",", "key", "=", "lambda", "m", ":", "-", "self", ".", "su_matrix", "[", "u_id", "]", "[", "m", "]", ")", "for", "item", "in", "unpredicted_items", ":", "rui", "=", "0", "sim_sum", "=", "0", "# Intersection bt. the neighbors closest to the user and the users who accessed the unknown item.", "common_users", "=", "list", "(", "set", "(", "self", ".", "users_id_viewed_item", ".", "get", "(", "item", ",", "[", "]", ")", ")", ".", "intersection", "(", "neighbors", "[", "1", ":", "self", ".", "k_neighbors", "]", ")", ")", "if", "common_users", ":", "for", "user_v_id", "in", "common_users", ":", "user_v", "=", "self", ".", "user_id_to_user", "[", "user_v_id", "]", "sim_uv", "=", "self", ".", "su_matrix", "[", "u_id", ",", "user_v_id", "]", "rui", "+=", "(", "self", ".", "train_set", "[", "'feedback'", "]", "[", "user_v", "]", "[", "item", "]", "-", "self", ".", "bui", "[", "user_v", "]", "[", "item", "]", ")", "*", "sim_uv", "if", "sim_sum", "!=", "0", "else", "0.001", "sim_sum", "+=", "sim_uv", "if", "sim_sum", "!=", "0", "else", "0.001", "rui", "=", "self", ".", "bui", "[", "user", "]", "[", "item", "]", "+", "(", "rui", "/", "sim_sum", ")", "else", ":", "rui", "=", "self", ".", "bui", "[", "user", "]", "[", "item", "]", "# normalize the ratings based on the highest and lowest value.", "if", "rui", ">", "self", ".", "train_set", "[", "\"max_value\"", "]", ":", "rui", "=", "self", ".", "train_set", "[", "\"max_value\"", "]", "if", "rui", "<", "self", ".", "train_set", "[", "\"min_value\"", "]", ":", "rui", "=", "self", ".", "train_set", "[", "\"min_value\"", "]", "predictions", ".", "append", "(", "(", "user", ",", "item", ",", "rui", ")", ")", "return", "sorted", "(", "predictions", ",", "key", "=", "lambda", "x", ":", "x", "[", "1", "]", ")" ]
[ 181, 4 ]
[ 234, 54 ]
python
en
['en', 'error', 'th']
False
UserKNN.compute
(self, verbose=True, metrics=None, verbose_evaluation=True, as_table=False, table_sep='\t')
Extends compute method from BaseItemRecommendation. Method to run recommender algorithm :param verbose: Print recommender and database information :type verbose: bool, default True :param metrics: List of evaluation metrics :type metrics: list, default None :param verbose_evaluation: Print the evaluation results :type verbose_evaluation: bool, default True :param as_table: Print the evaluation results as table :type as_table: bool, default False :param table_sep: Delimiter for print results (only work with verbose=True and as_table=True) :type table_sep: str, default '\t'
Extends compute method from BaseItemRecommendation. Method to run recommender algorithm
def compute(self, verbose=True, metrics=None, verbose_evaluation=True, as_table=False, table_sep='\t'): """ Extends compute method from BaseItemRecommendation. Method to run recommender algorithm :param verbose: Print recommender and database information :type verbose: bool, default True :param metrics: List of evaluation metrics :type metrics: list, default None :param verbose_evaluation: Print the evaluation results :type verbose_evaluation: bool, default True :param as_table: Print the evaluation results as table :type as_table: bool, default False :param table_sep: Delimiter for print results (only work with verbose=True and as_table=True) :type table_sep: str, default '\t' """ super(UserKNN, self).compute(verbose=verbose) if verbose: self.init_model() print("training_time:: %4f sec" % timed(self.train_baselines)) if self.extra_info_header is not None: print(self.extra_info_header) print("prediction_time:: %4f sec" % timed(self.predict)) else: # Execute all in silence without prints self.extra_info_header = None self.init_model() self.train_baselines() self.predict() self.write_predictions() if self.test_file is not None: self.evaluate(metrics, verbose_evaluation, as_table=as_table, table_sep=table_sep)
[ "def", "compute", "(", "self", ",", "verbose", "=", "True", ",", "metrics", "=", "None", ",", "verbose_evaluation", "=", "True", ",", "as_table", "=", "False", ",", "table_sep", "=", "'\\t'", ")", ":", "super", "(", "UserKNN", ",", "self", ")", ".", "compute", "(", "verbose", "=", "verbose", ")", "if", "verbose", ":", "self", ".", "init_model", "(", ")", "print", "(", "\"training_time:: %4f sec\"", "%", "timed", "(", "self", ".", "train_baselines", ")", ")", "if", "self", ".", "extra_info_header", "is", "not", "None", ":", "print", "(", "self", ".", "extra_info_header", ")", "print", "(", "\"prediction_time:: %4f sec\"", "%", "timed", "(", "self", ".", "predict", ")", ")", "else", ":", "# Execute all in silence without prints", "self", ".", "extra_info_header", "=", "None", "self", ".", "init_model", "(", ")", "self", ".", "train_baselines", "(", ")", "self", ".", "predict", "(", ")", "self", ".", "write_predictions", "(", ")", "if", "self", ".", "test_file", "is", "not", "None", ":", "self", ".", "evaluate", "(", "metrics", ",", "verbose_evaluation", ",", "as_table", "=", "as_table", ",", "table_sep", "=", "table_sep", ")" ]
[ 236, 4 ]
[ 276, 94 ]
python
en
['en', 'error', 'th']
False
get_all_headers
(message, key)
Given an HTTPMessage, return all headers matching a given key.
Given an HTTPMessage, return all headers matching a given key.
def get_all_headers(message, key): """ Given an HTTPMessage, return all headers matching a given key. """ return message.get_all(key)
[ "def", "get_all_headers", "(", "message", ",", "key", ")", ":", "return", "message", ".", "get_all", "(", "key", ")" ]
[ 9, 0 ]
[ 13, 31 ]
python
en
['en', 'error', 'th']
False
_convert_vcf_to_table
(vcf_filename: Path)
Converts all records in a vcf file into a list of dictionaries.
Converts all records in a vcf file into a list of dictionaries.
def _convert_vcf_to_table(vcf_filename: Path) -> List[Dict[str, Any]]: """Converts all records in a vcf file into a list of dictionaries.""" table: List[Dict[str, str]] = list() seen_positions = set() with vcf_filename.open('r') as file1: vcf_reader = vcf.Reader(file1) for record in vcf_reader: data = _convert_record_to_dictionary(record) # VCF files sometimes record separate mutations as occuring at the same position. # The gd file will instead increment the second mutations position by 1. Do this to maintain compatibility. if (data['seq id'], data['position']) in seen_positions: data['position'] += 1 seen_positions.add((data['seq id'], data['position'])) table.append(data) return table
[ "def", "_convert_vcf_to_table", "(", "vcf_filename", ":", "Path", ")", "->", "List", "[", "Dict", "[", "str", ",", "Any", "]", "]", ":", "table", ":", "List", "[", "Dict", "[", "str", ",", "str", "]", "]", "=", "list", "(", ")", "seen_positions", "=", "set", "(", ")", "with", "vcf_filename", ".", "open", "(", "'r'", ")", "as", "file1", ":", "vcf_reader", "=", "vcf", ".", "Reader", "(", "file1", ")", "for", "record", "in", "vcf_reader", ":", "data", "=", "_convert_record_to_dictionary", "(", "record", ")", "# VCF files sometimes record separate mutations as occuring at the same position.", "# The gd file will instead increment the second mutations position by 1. Do this to maintain compatibility.", "if", "(", "data", "[", "'seq id'", "]", ",", "data", "[", "'position'", "]", ")", "in", "seen_positions", ":", "data", "[", "'position'", "]", "+=", "1", "seen_positions", ".", "add", "(", "(", "data", "[", "'seq id'", "]", ",", "data", "[", "'position'", "]", ")", ")", "table", ".", "append", "(", "data", ")", "return", "table" ]
[ 36, 0 ]
[ 50, 13 ]
python
en
['en', 'en', 'en']
True
parse_vcf_file
(filename: Path, set_index: bool = True)
Converts the VCF file generated by breseq into a pandas Dataframe. Parameters ---------- filename: Path Either a folder containing a single breseq run or a path to the vcf file itself. set_index:bool; default True Whether to set the index of the dataframe. Returns ------- pandas.DataFrame - Index -> (VCFColumns.sample_name, VCFColumns.sequence_id, VCFColumns.position) - Columns-> VCFColumns
Converts the VCF file generated by breseq into a pandas Dataframe. Parameters ---------- filename: Path Either a folder containing a single breseq run or a path to the vcf file itself. set_index:bool; default True Whether to set the index of the dataframe. Returns ------- pandas.DataFrame - Index -> (VCFColumns.sample_name, VCFColumns.sequence_id, VCFColumns.position) - Columns-> VCFColumns
def parse_vcf_file(filename: Path, set_index: bool = True) -> pandas.DataFrame: """ Converts the VCF file generated by breseq into a pandas Dataframe. Parameters ---------- filename: Path Either a folder containing a single breseq run or a path to the vcf file itself. set_index:bool; default True Whether to set the index of the dataframe. Returns ------- pandas.DataFrame - Index -> (VCFColumns.sample_name, VCFColumns.sequence_id, VCFColumns.position) - Columns-> VCFColumns """ table = _convert_vcf_to_table(filename) # Columns are defined in VCFColumns vcf_df: pandas.DataFrame = pandas.DataFrame(table) # Order the columns correctly filtered_df = vcf_df[list(VCFColumns)] if set_index: filtered_df.set_index(keys = [VCFColumns.sequence_id, VCFColumns.position], inplace = True) return filtered_df
[ "def", "parse_vcf_file", "(", "filename", ":", "Path", ",", "set_index", ":", "bool", "=", "True", ")", "->", "pandas", ".", "DataFrame", ":", "table", "=", "_convert_vcf_to_table", "(", "filename", ")", "# Columns are defined in VCFColumns", "vcf_df", ":", "pandas", ".", "DataFrame", "=", "pandas", ".", "DataFrame", "(", "table", ")", "# Order the columns correctly", "filtered_df", "=", "vcf_df", "[", "list", "(", "VCFColumns", ")", "]", "if", "set_index", ":", "filtered_df", ".", "set_index", "(", "keys", "=", "[", "VCFColumns", ".", "sequence_id", ",", "VCFColumns", ".", "position", "]", ",", "inplace", "=", "True", ")", "return", "filtered_df" ]
[ 53, 0 ]
[ 80, 19 ]
python
en
['en', 'error', 'th']
False
split_unquoted_newlines
(stmt)
Split a string on all unquoted newlines. Unlike str.splitlines(), this will ignore CR/LF/CR+LF if the requisite character is inside of a string.
Split a string on all unquoted newlines.
def split_unquoted_newlines(stmt): """Split a string on all unquoted newlines. Unlike str.splitlines(), this will ignore CR/LF/CR+LF if the requisite character is inside of a string.""" text = str(stmt) lines = SPLIT_REGEX.split(text) outputlines = [''] for line in lines: if not line: continue elif LINE_MATCH.match(line): outputlines.append('') else: outputlines[-1] += line return outputlines
[ "def", "split_unquoted_newlines", "(", "stmt", ")", ":", "text", "=", "str", "(", "stmt", ")", "lines", "=", "SPLIT_REGEX", ".", "split", "(", "text", ")", "outputlines", "=", "[", "''", "]", "for", "line", "in", "lines", ":", "if", "not", "line", ":", "continue", "elif", "LINE_MATCH", ".", "match", "(", "line", ")", ":", "outputlines", ".", "append", "(", "''", ")", "else", ":", "outputlines", "[", "-", "1", "]", "+=", "line", "return", "outputlines" ]
[ 35, 0 ]
[ 50, 22 ]
python
en
['en', 'en', 'en']
True
remove_quotes
(val)
Helper that removes surrounding quotes from strings.
Helper that removes surrounding quotes from strings.
def remove_quotes(val): """Helper that removes surrounding quotes from strings.""" if val is None: return if val[0] in ('"', "'") and val[0] == val[-1]: val = val[1:-1] return val
[ "def", "remove_quotes", "(", "val", ")", ":", "if", "val", "is", "None", ":", "return", "if", "val", "[", "0", "]", "in", "(", "'\"'", ",", "\"'\"", ")", "and", "val", "[", "0", "]", "==", "val", "[", "-", "1", "]", ":", "val", "=", "val", "[", "1", ":", "-", "1", "]", "return", "val" ]
[ 53, 0 ]
[ 59, 14 ]
python
en
['en', 'en', 'en']
True
recurse
(*cls)
Function decorator to help with recursion :param cls: Classes to not recurse over :return: function
Function decorator to help with recursion
def recurse(*cls): """Function decorator to help with recursion :param cls: Classes to not recurse over :return: function """ def wrap(f): def wrapped_f(tlist): for sgroup in tlist.get_sublists(): if not isinstance(sgroup, cls): wrapped_f(sgroup) f(tlist) return wrapped_f return wrap
[ "def", "recurse", "(", "*", "cls", ")", ":", "def", "wrap", "(", "f", ")", ":", "def", "wrapped_f", "(", "tlist", ")", ":", "for", "sgroup", "in", "tlist", ".", "get_sublists", "(", ")", ":", "if", "not", "isinstance", "(", "sgroup", ",", "cls", ")", ":", "wrapped_f", "(", "sgroup", ")", "f", "(", "tlist", ")", "return", "wrapped_f", "return", "wrap" ]
[ 62, 0 ]
[ 77, 15 ]
python
en
['en', 'en', 'en']
True
imt
(token, i=None, m=None, t=None)
Helper function to simplify comparisons Instance, Match and TokenType :param token: :param i: Class or Tuple/List of Classes :param m: Tuple of TokenType & Value. Can be list of Tuple for multiple :param t: TokenType or Tuple/List of TokenTypes :return: bool
Helper function to simplify comparisons Instance, Match and TokenType :param token: :param i: Class or Tuple/List of Classes :param m: Tuple of TokenType & Value. Can be list of Tuple for multiple :param t: TokenType or Tuple/List of TokenTypes :return: bool
def imt(token, i=None, m=None, t=None): """Helper function to simplify comparisons Instance, Match and TokenType :param token: :param i: Class or Tuple/List of Classes :param m: Tuple of TokenType & Value. Can be list of Tuple for multiple :param t: TokenType or Tuple/List of TokenTypes :return: bool """ clss = i types = [t, ] if t and not isinstance(t, list) else t mpatterns = [m, ] if m and not isinstance(m, list) else m if token is None: return False elif clss and isinstance(token, clss): return True elif mpatterns and any(token.match(*pattern) for pattern in mpatterns): return True elif types and any(token.ttype in ttype for ttype in types): return True else: return False
[ "def", "imt", "(", "token", ",", "i", "=", "None", ",", "m", "=", "None", ",", "t", "=", "None", ")", ":", "clss", "=", "i", "types", "=", "[", "t", ",", "]", "if", "t", "and", "not", "isinstance", "(", "t", ",", "list", ")", "else", "t", "mpatterns", "=", "[", "m", ",", "]", "if", "m", "and", "not", "isinstance", "(", "m", ",", "list", ")", "else", "m", "if", "token", "is", "None", ":", "return", "False", "elif", "clss", "and", "isinstance", "(", "token", ",", "clss", ")", ":", "return", "True", "elif", "mpatterns", "and", "any", "(", "token", ".", "match", "(", "*", "pattern", ")", "for", "pattern", "in", "mpatterns", ")", ":", "return", "True", "elif", "types", "and", "any", "(", "token", ".", "ttype", "in", "ttype", "for", "ttype", "in", "types", ")", ":", "return", "True", "else", ":", "return", "False" ]
[ 80, 0 ]
[ 101, 20 ]
python
en
['en', 'en', 'en']
True
consume
(iterator, n)
Advance the iterator n-steps ahead. If n is none, consume entirely.
Advance the iterator n-steps ahead. If n is none, consume entirely.
def consume(iterator, n): """Advance the iterator n-steps ahead. If n is none, consume entirely.""" deque(itertools.islice(iterator, n), maxlen=0)
[ "def", "consume", "(", "iterator", ",", "n", ")", ":", "deque", "(", "itertools", ".", "islice", "(", "iterator", ",", "n", ")", ",", "maxlen", "=", "0", ")" ]
[ 104, 0 ]
[ 106, 50 ]
python
en
['en', 'en', 'en']
True
private_to_public
()
Reads a private key and outputs the corresponding public key.
Reads a private key and outputs the corresponding public key.
def private_to_public(): """Reads a private key and outputs the corresponding public key.""" # Parse the CLI options parser = OptionParser(usage='usage: %prog [options]', description='Reads a private key and outputs the ' 'corresponding public key. Both private and public keys use ' 'the format described in PKCS#1 v1.5') parser.add_option('-i', '--input', dest='infilename', type='string', help='Input filename. Reads from stdin if not specified') parser.add_option('-o', '--output', dest='outfilename', type='string', help='Output filename. Writes to stdout of not specified') parser.add_option('--inform', dest='inform', help='key format of input - default PEM', choices=('PEM', 'DER'), default='PEM') parser.add_option('--outform', dest='outform', help='key format of output - default PEM', choices=('PEM', 'DER'), default='PEM') (cli, cli_args) = parser.parse_args(sys.argv) # Read the input data if cli.infilename: print('Reading private key from %s in %s format' % (cli.infilename, cli.inform), file=sys.stderr) with open(cli.infilename, 'rb') as infile: in_data = infile.read() else: print('Reading private key from stdin in %s format' % cli.inform, file=sys.stderr) in_data = sys.stdin.read().encode('ascii') assert type(in_data) == bytes, type(in_data) # Take the public fields and create a public key priv_key = rsa.key.PrivateKey.load_pkcs1(in_data, cli.inform) pub_key = rsa.key.PublicKey(priv_key.n, priv_key.e) # Save to the output file out_data = pub_key.save_pkcs1(cli.outform) if cli.outfilename: print('Writing public key to %s in %s format' % (cli.outfilename, cli.outform), file=sys.stderr) with open(cli.outfilename, 'wb') as outfile: outfile.write(out_data) else: print('Writing public key to stdout in %s format' % cli.outform, file=sys.stderr) sys.stdout.write(out_data.decode('ascii'))
[ "def", "private_to_public", "(", ")", ":", "# Parse the CLI options", "parser", "=", "OptionParser", "(", "usage", "=", "'usage: %prog [options]'", ",", "description", "=", "'Reads a private key and outputs the '", "'corresponding public key. Both private and public keys use '", "'the format described in PKCS#1 v1.5'", ")", "parser", ".", "add_option", "(", "'-i'", ",", "'--input'", ",", "dest", "=", "'infilename'", ",", "type", "=", "'string'", ",", "help", "=", "'Input filename. Reads from stdin if not specified'", ")", "parser", ".", "add_option", "(", "'-o'", ",", "'--output'", ",", "dest", "=", "'outfilename'", ",", "type", "=", "'string'", ",", "help", "=", "'Output filename. Writes to stdout of not specified'", ")", "parser", ".", "add_option", "(", "'--inform'", ",", "dest", "=", "'inform'", ",", "help", "=", "'key format of input - default PEM'", ",", "choices", "=", "(", "'PEM'", ",", "'DER'", ")", ",", "default", "=", "'PEM'", ")", "parser", ".", "add_option", "(", "'--outform'", ",", "dest", "=", "'outform'", ",", "help", "=", "'key format of output - default PEM'", ",", "choices", "=", "(", "'PEM'", ",", "'DER'", ")", ",", "default", "=", "'PEM'", ")", "(", "cli", ",", "cli_args", ")", "=", "parser", ".", "parse_args", "(", "sys", ".", "argv", ")", "# Read the input data", "if", "cli", ".", "infilename", ":", "print", "(", "'Reading private key from %s in %s format'", "%", "(", "cli", ".", "infilename", ",", "cli", ".", "inform", ")", ",", "file", "=", "sys", ".", "stderr", ")", "with", "open", "(", "cli", ".", "infilename", ",", "'rb'", ")", "as", "infile", ":", "in_data", "=", "infile", ".", "read", "(", ")", "else", ":", "print", "(", "'Reading private key from stdin in %s format'", "%", "cli", ".", "inform", ",", "file", "=", "sys", ".", "stderr", ")", "in_data", "=", "sys", ".", "stdin", ".", "read", "(", ")", ".", "encode", "(", "'ascii'", ")", "assert", "type", "(", "in_data", ")", "==", "bytes", ",", "type", "(", "in_data", ")", "# Take the public fields and create a public key", "priv_key", "=", "rsa", ".", "key", ".", "PrivateKey", ".", "load_pkcs1", "(", "in_data", ",", "cli", ".", "inform", ")", "pub_key", "=", "rsa", ".", "key", ".", "PublicKey", "(", "priv_key", ".", "n", ",", "priv_key", ".", "e", ")", "# Save to the output file", "out_data", "=", "pub_key", ".", "save_pkcs1", "(", "cli", ".", "outform", ")", "if", "cli", ".", "outfilename", ":", "print", "(", "'Writing public key to %s in %s format'", "%", "(", "cli", ".", "outfilename", ",", "cli", ".", "outform", ")", ",", "file", "=", "sys", ".", "stderr", ")", "with", "open", "(", "cli", ".", "outfilename", ",", "'wb'", ")", "as", "outfile", ":", "outfile", ".", "write", "(", "out_data", ")", "else", ":", "print", "(", "'Writing public key to stdout in %s format'", "%", "cli", ".", "outform", ",", "file", "=", "sys", ".", "stderr", ")", "sys", ".", "stdout", ".", "write", "(", "out_data", ".", "decode", "(", "'ascii'", ")", ")" ]
[ 26, 0 ]
[ 78, 50 ]
python
en
['en', 'en', 'en']
True
main
()
Entry point for the GUI-version of Blockify.
Entry point for the GUI-version of Blockify.
def main(): "Entry point for the GUI-version of Blockify." # Edit this for less or more logging. Loglevel 0 is least verbose. blockify.init_logger(logpath=None, loglevel=2, quiet=False) ui = BlockifyUI() gtk.main()
[ "def", "main", "(", ")", ":", "# Edit this for less or more logging. Loglevel 0 is least verbose.", "blockify", ".", "init_logger", "(", "logpath", "=", "None", ",", "loglevel", "=", "2", ",", "quiet", "=", "False", ")", "ui", "=", "BlockifyUI", "(", ")", "gtk", ".", "main", "(", ")" ]
[ 426, 0 ]
[ 431, 14 ]
python
en
['en', 'en', 'en']
True
Notepad.create_keybinds
(self)
Register Ctrl+Q/W to quit and Ctrl+S to save the blocklist.
Register Ctrl+Q/W to quit and Ctrl+S to save the blocklist.
def create_keybinds(self): "Register Ctrl+Q/W to quit and Ctrl+S to save the blocklist." quit_group = gtk.AccelGroup() quit_group.connect_group(ord("q"), gtk.gdk.CONTROL_MASK, gtk.ACCEL_LOCKED, self.destroy) quit_group.connect_group(ord("w"), gtk.gdk.CONTROL_MASK, gtk.ACCEL_LOCKED, self.destroy) self.add_accel_group(quit_group) save_group = gtk.AccelGroup() save_group.connect_group(ord("s"), gtk.gdk.CONTROL_MASK, gtk.ACCEL_LOCKED, self.save) self.add_accel_group(save_group)
[ "def", "create_keybinds", "(", "self", ")", ":", "quit_group", "=", "gtk", ".", "AccelGroup", "(", ")", "quit_group", ".", "connect_group", "(", "ord", "(", "\"q\"", ")", ",", "gtk", ".", "gdk", ".", "CONTROL_MASK", ",", "gtk", ".", "ACCEL_LOCKED", ",", "self", ".", "destroy", ")", "quit_group", ".", "connect_group", "(", "ord", "(", "\"w\"", ")", ",", "gtk", ".", "gdk", ".", "CONTROL_MASK", ",", "gtk", ".", "ACCEL_LOCKED", ",", "self", ".", "destroy", ")", "self", ".", "add_accel_group", "(", "quit_group", ")", "save_group", "=", "gtk", ".", "AccelGroup", "(", ")", "save_group", ".", "connect_group", "(", "ord", "(", "\"s\"", ")", ",", "gtk", ".", "gdk", ".", "CONTROL_MASK", ",", "gtk", ".", "ACCEL_LOCKED", ",", "self", ".", "save", ")", "self", ".", "add_accel_group", "(", "save_group", ")" ]
[ 75, 4 ]
[ 87, 40 ]
python
en
['en', 'en', 'en']
True
Notepad.destroy
(self, *args)
Overloading destroy to untoggle the Open List button.
Overloading destroy to untoggle the Open List button.
def destroy(self, *args): "Overloading destroy to untoggle the Open List button." super(Notepad, self).destroy() self.parentw.togglelist.set_active(False)
[ "def", "destroy", "(", "self", ",", "*", "args", ")", ":", "super", "(", "Notepad", ",", "self", ")", ".", "destroy", "(", ")", "self", ".", "parentw", ".", "togglelist", ".", "set_active", "(", "False", ")" ]
[ 89, 4 ]
[ 92, 49 ]
python
en
['en', 'en', 'en']
True
BlockifyUI.update
(self)
Main GUI loop, 250ms interval (self.update_interval).
Main GUI loop, 250ms interval (self.update_interval).
def update(self): "Main GUI loop, 250ms interval (self.update_interval)." # Call the main update function of blockify and assign return value # (True/False) depending on whether a song to be blocked was found. self.found = self.b.update() # Correct the automute state, if necessary. if not any([self.mute_toggled, self.automute_toggled, self.b.automute]): self.b.automute = True # Our main GUI workers here, updating labels, buttons and the likes. self.update_songinfo() self.update_labels() self.update_togglebuttons() # The glib.timeout loop will only break if we return False here. return True
[ "def", "update", "(", "self", ")", ":", "# Call the main update function of blockify and assign return value", "# (True/False) depending on whether a song to be blocked was found.", "self", ".", "found", "=", "self", ".", "b", ".", "update", "(", ")", "# Correct the automute state, if necessary.", "if", "not", "any", "(", "[", "self", ".", "mute_toggled", ",", "self", ".", "automute_toggled", ",", "self", ".", "b", ".", "automute", "]", ")", ":", "self", ".", "b", ".", "automute", "=", "True", "# Our main GUI workers here, updating labels, buttons and the likes.", "self", ".", "update_songinfo", "(", ")", "self", ".", "update_labels", "(", ")", "self", ".", "update_togglebuttons", "(", ")", "# The glib.timeout loop will only break if we return False here.", "return", "True" ]
[ 200, 4 ]
[ 216, 19 ]
python
en
['en', 'af', 'en']
True