docstring
stringlengths
52
499
function
stringlengths
67
35.2k
__index_level_0__
int64
52.6k
1.16M
Zip together multiple columns. Args: columns (WeldObject / Numpy.ndarray): lust of columns Returns: A WeldObject representing this computation
def unzip_columns(expr, column_types): weld_obj = WeldObject(encoder_, decoder_) column_appenders = [] struct_fields = [] result_fields = [] for i, column_type in enumerate(column_types): column_appenders.append("appender[%s]" % column_type) struct_fields.append("merge(b.$%s, e.$%s)" % (i, i)) result_fields.append("result(unzip_builder.$%s)" % i) appender_string = "{%s}" % ", ".join(column_appenders) struct_string = "{%s}" % ", ".join(struct_fields) result_string = "{%s}" % ", ".join(result_fields) expr_var = weld_obj.update(expr) if isinstance(expr, WeldObject): expr_var = expr.obj_id weld_obj.dependencies[expr_var] = expr weld_template = weld_obj.weld_code = weld_template % {"expr": expr_var, "appenders": appender_string, "struct_builder": struct_string, "result": result_string} return weld_obj
168,147
Sorts the vector. If the field parameter is provided then the sort operators on a vector of structs where the sort key is the field of the struct. Args: expr (WeldObject) field (Int)
def sort(expr, field = None, keytype=None, ascending=True): weld_obj = WeldObject(encoder_, decoder_) expr_var = weld_obj.update(expr) if isinstance(expr, WeldObject): expr_var = expr.obj_id weld_obj.dependencies[expr_var] = expr if field is not None: key_str = "x.$%s" % field else: key_str = "x" if not ascending: # The type is not necessarily f64. key_str = key_str + "* %s(-1)" % keytype weld_template = weld_obj.weld_code = weld_template % {"expr":expr_var, "key":key_str} return weld_obj
168,148
Slices the vector. Args: expr (WeldObject) start (Long) stop (Long)
def slice_vec(expr, start, stop): weld_obj = WeldObject(encoder_, decoder_) expr_var = weld_obj.update(expr) if isinstance(expr, WeldObject): expr_var = expr.obj_id weld_obj.dependencies[expr_var] = expr weld_template = weld_obj.weld_code = weld_template % {"expr":expr_var, "start":start, "stop":stop} return weld_obj
168,149
Zip together multiple columns. Args: columns (WeldObject / Numpy.ndarray): lust of columns Returns: A WeldObject representing this computation
def zip_columns(columns): weld_obj = WeldObject(encoder_, decoder_) column_vars = [] for column in columns: col_var = weld_obj.update(column) if isinstance(column, WeldObject): col_var = column.obj_id weld_obj.dependencies[col_var] = column column_vars.append(col_var) arrays = ", ".join(column_vars) weld_template = weld_obj.weld_code = weld_template % { "array": arrays, } return weld_obj
168,150
Performs passed-in comparison op between every element in the passed-in array and other, and returns an array of booleans. Args: array (WeldObject / Numpy.ndarray): Input array other (WeldObject / Numpy.ndarray): Second input array op (str): Op string used for element-wise comparison (== >= <= !=) ty (WeldType): Type of each element in the input array Returns: A WeldObject representing this computation
def compare(array, other, op, ty_str): weld_obj = WeldObject(encoder_, decoder_) array_var = weld_obj.update(array) if isinstance(array, WeldObject): array_var = array.obj_id weld_obj.dependencies[array_var] = array # Strings need to be encoded into vec[char] array. # Constants can be added directly to NVL snippet. if isinstance(other, str) or isinstance(other, WeldObject): other_var = weld_obj.update(other) if isinstance(other, WeldObject): other_var = tmp.obj_id weld_obj.dependencies[other_var] = other else: other_var = "%s(%s)" % (ty_str, str(other)) weld_template = weld_obj.weld_code = weld_template % {"array": array_var, "other": other_var, "op": op, "ty": ty_str} return weld_obj
168,151
Returns a new array-of-arrays with each array truncated, starting at index `start` for `length` characters. Args: array (WeldObject / Numpy.ndarray): Input array start (int): starting index size (int): length to truncate at ty (WeldType): Type of each element in the input array Returns: A WeldObject representing this computation
def slice(array, start, size, ty): weld_obj = WeldObject(encoder_, decoder_) array_var = weld_obj.update(array) if isinstance(array, WeldObject): array_var = array.obj_id weld_obj.dependencies[array_var] = array weld_template = weld_obj.weld_code = weld_template % {"array": array_var, "start": start, "ty": ty, "size": size} return weld_obj
168,152
Checks if given string is contained in each string in the array. Output is a vec of booleans. Args: array (WeldObject / Numpy.ndarray): Input array start (int): starting index size (int): length to truncate at ty (WeldType): Type of each element in the input array Returns: A WeldObject representing this computation
def contains(array, ty, string): weld_obj = WeldObject(encoder_, decoder_) string_obj = weld_obj.update(string) if isinstance(string, WeldObject): string_obj = string.obj_id weld_obj.dependencies[string_obj] = string array_var = weld_obj.update(array) if isinstance(array, WeldObject): array_var = array.obj_id weld_obj.dependencies[array_var] = array (start, end) = 0, len(string) # Some odd bug where iterating on str and slicing str results # in a segfault weld_template = weld_obj.weld_code = weld_template % {"array": array_var, "ty": ty, "start": start, "end": end, "cmpstr": string_obj} return weld_obj
168,153
Groups the given columns by the corresponding grouping column value, and aggregate by summing values. Args: columns (List<WeldObject>): List of columns as WeldObjects column_tys (List<str>): List of each column data ty grouping_column (WeldObject): Column to group rest of columns by Returns: A WeldObject representing this computation
def groupby_size(columns, column_tys, grouping_columns, grouping_column_tys): weld_obj = WeldObject(encoder_, decoder_) if len(grouping_columns) == 1 and len(grouping_column_tys) == 1: grouping_column_var = weld_obj.update(grouping_columns[0]) if isinstance(grouping_columns[0], WeldObject): grouping_column_var = grouping_columns[0].weld_code grouping_column_ty_str = "%s" % grouping_column_tys[0] else: grouping_column_vars = [] for column in grouping_columns: column_var = weld_obj.update(column) if isinstance(column_var, WeldObject): column_var = column_var.weld_code grouping_column_vars.append(column_var) grouping_column_var = ", ".join(grouping_column_vars) grouping_column_var = "zip(%s)" % grouping_column_var grouping_column_tys = [str(ty) for ty in grouping_column_tys] grouping_column_ty_str = ", ".join(grouping_column_tys) grouping_column_ty_str = "{%s}" % grouping_column_ty_str weld_template = weld_obj.weld_code = weld_template % {"grouping_column": grouping_column_var, "gty": grouping_column_ty_str} return weld_obj
168,161
Groups the given columns by the corresponding grouping column value, and aggregate by summing values. Args: columns (List<WeldObject>): List of columns as WeldObjects column_tys (List<str>): List of each column data ty grouping_column (WeldObject): Column to group rest of columns by Returns: A WeldObject representing this computation
def groupby_sort(columns, column_tys, grouping_columns, grouping_column_tys, key_index, ascending): weld_obj = WeldObject(encoder_, decoder_) if len(grouping_columns) == 1 and len(grouping_column_tys) == 1: grouping_column_var = weld_obj.update(grouping_columns[0]) if isinstance(grouping_columns[0], WeldObject): grouping_column_var = grouping_columns[0].weld_code grouping_column_ty_str = "%s" % grouping_column_tys[0] else: grouping_column_vars = [] for column in grouping_columns: column_var = weld_obj.update(column) if isinstance(column, WeldObject): column_var = column.obj_id weld_obj.dependencies[column_var] = column grouping_column_vars.append(column_var) grouping_column_var = ", ".join(grouping_column_vars) grouping_column_tys = [str(ty) for ty in grouping_column_tys] grouping_column_ty_str = ", ".join(grouping_column_tys) grouping_column_ty_str = "{%s}" % grouping_column_ty_str columns_var_list = [] for column in columns: column_var = weld_obj.update(column) if isinstance(column, WeldObject): column_var = column.obj_id weld_obj.dependencies[column_var] = column columns_var_list.append(column_var) if len(columns_var_list) == 1 and len(grouping_columns) == 1: columns_var = columns_var_list[0] tys_str = column_tys[0] result_str = "merge(b, e)" elif len(columns_var_list) == 1 and len(grouping_columns) > 1: columns_var = columns_var_list[0] tys_str = column_tys[0] key_str_list = [] for i in xrange(0, len(grouping_columns)): key_str_list.append("e.$%d" % i) key_str = "{%s}" % ", ".join(key_str_list) value_str = "e.$" + str(len(grouping_columns)) result_str_list = [key_str, value_str] result_str = "merge(b, {%s})" % ", ".join(result_str_list) elif len(columns_var_list) > 1 and len(grouping_columns) == 1: columns_var = "%s" % ", ".join(columns_var_list) column_tys = [str(ty) for ty in column_tys] tys_str = "{%s}" % ", ".join(column_tys) key_str = "e.$0" value_str_list = [] for i in xrange(1, len(columns) + 1): value_str_list.append("e.$%d" % i) value_str = "{%s}" % ", ".join(value_str_list) result_str_list = [key_str, value_str] result_str = "merge(b, {%s})" % ", ".join(result_str_list) else: columns_var = "%s" % ", ".join(columns_var_list) column_tys = [str(ty) for ty in column_tys] tys_str = "{%s}" % ", ".join(column_tys) key_str_list = [] key_size = len(grouping_columns) value_size = len(columns) for i in xrange(0, key_size): key_str_list.append("e.$%d" % i) key_str = "{%s}" % ", ".join(key_str_list) value_str_list = [] for i in xrange(key_size, key_size + value_size): value_str_list.append("e.$%d" % i) value_str = "{%s}" % ", ".join(value_str_list) result_str_list = [key_str, value_str] result_str = "merge(b, {%s})" % ", ".join(result_str_list) if key_index == None: key_str_x = "x" key_str_y = "y" else : key_str_x = "x.$%d" % key_index key_str_y = "y.$%d" % key_index if ascending == False: key_str = key_str + "* %s(-1)" % column_tys[key_index] weld_template = weld_obj.weld_code = weld_template % {"grouping_column": grouping_column_var, "columns": columns_var, "result": result_str, "ty": tys_str, "gty": grouping_column_ty_str, "key_str_x": key_str_x, "key_str_y": key_str_y} return weld_obj
168,162
Groups the given columns by the corresponding grouping column value, and aggregate by summing values. Args: columns (List<WeldObject>): List of columns as WeldObjects column_tys (List<str>): List of each column data ty grouping_column (WeldObject): Column to group rest of columns by Returns: A WeldObject representing this computation
def flatten_group(expr, column_tys, grouping_column_tys): weld_obj = WeldObject(encoder_, decoder_) group_var = weld_obj.update(expr) num_group_cols = len(grouping_column_tys) if num_group_cols == 1: grouping_column_ty_str = "%s" % grouping_column_tys[0] grouping_column_key_str = "a.$0" else: grouping_column_tys = [str(ty) for ty in grouping_column_tys] grouping_column_ty_str = ", ".join(grouping_column_tys) grouping_column_ty_str = "{%s}" % grouping_column_ty_str grouping_column_keys = ["a.$0.$%s" % i for i in range(0, num_group_cols)] grouping_column_key_str = "{%s}" % ", ".join(grouping_column_keys) if len(column_tys) == 1: tys_str = "%s" % column_tys[0] column_values_str = "b.$1" else: column_tys = [str(ty) for ty in column_tys] tys_str = "{%s}" % ", ".join(column_tys) column_values = ["b.$%s" % i for i in range(0, len(column_tys))] column_values_str = "{%s}" % ", ".join(column_values) # TODO: The output in pandas keps the keys sorted (even if keys are structs) # We need to allow keyfunctions in sort by clauses to be able to compare structs. # sort(%(group_vec)s, |x:{%(gty)s, vec[%(ty)s]}| x.$0), weld_template = weld_obj.weld_code = weld_template % {"group_vec": expr.weld_code, "gkeys": grouping_column_key_str, "gvals": column_values_str, "ty": tys_str, "gty": grouping_column_ty_str} return weld_obj
168,163
Groups the given columns by the corresponding grouping column value, and aggregate by summing values. Args: columns (List<WeldObject>): List of columns as WeldObjects column_tys (List<str>): List of each column data ty grouping_column (WeldObject): Column to group rest of columns by Returns: A WeldObject representing this computation
def grouped_slice(expr, type, start, size): weld_obj = WeldObject(encoder_, decoder_) weld_obj.update(expr) weld_template = weld_obj.weld_code = weld_template % {"vec": expr.weld_code, "type": type, "start": str(start), "size": str(size)} return weld_obj
168,164
Get column corresponding to passed-in index from ptr returned by groupBySum. Args: columns (List<WeldObject>): List of columns as WeldObjects column_tys (List<str>): List of each column data ty index (int): index of selected column Returns: A WeldObject representing this computation
def get_column(columns, column_tys, index): weld_obj = WeldObject(encoder_, decoder_) columns_var = weld_obj.update(columns, tys=WeldVec(column_tys), override=False) if isinstance(columns, WeldObject): columns_var = columns.obj_id weld_obj.dependencies[columns_var] = columns weld_template = weld_obj.weld_code = weld_template % {"columns": columns_var, "ty": column_tys, "index": index} return weld_obj
168,165
Computes the dot product between a matrix and a vector. TODO: Make this more generic Args: matrix (TYPE): Description vector (TYPE): Description
def dot(matrix, vector): matrix_weld_type = None vector_weld_type = None if isinstance(matrix, LazyOpResult): matrix_weld_type = matrix.weld_type matrix = matrix.expr elif isinstance(matrix, np.ndarray): matrix_weld_type = numpy_weld_impl.numpy_to_weld_type_mapping[ str(matrix.dtype)] if isinstance(vector, LazyOpResult): vector_weld_type = vector.weld_type vector = vector.expr elif isinstance(vector, np.ndarray): vector_weld_type = numpy_weld_impl.numpy_to_weld_type_mapping[ str(vector.dtype)] return NumpyArrayWeld( numpy_weld_impl.dot( matrix, vector, matrix_weld_type, vector_weld_type), WeldDouble())
168,170
Computes a per-element exponent of the passed-in vector. Args: vector (TYPE): Description
def exp(vector): weld_type = None if isinstance(vector, LazyOpResult): weld_type = vector.weld_type vector = vector.expr elif isinstance(vector, np.ndarray): weld_type = numpy_weld_impl.numpy_to_weld_type_mapping[ str(vector.dtype)] return NumpyArrayWeld(numpy_weld_impl.exp(vector, weld_type), WeldDouble())
168,171
Summary Args: other (TYPE): Description Returns: TYPE: Description
def __div__(self, other): if isinstance(other, LazyOpResult): other = other.expr return NumpyArrayWeld( numpy_weld_impl.div( self.expr, other, self.weld_type ), self.weld_type )
168,172
Summary Args: obj (TYPE): Description Returns: TYPE: Description Raises: Exception: Description
def py_to_weld_type(self, obj): if isinstance(obj, np.ndarray): dtype = str(obj.dtype) if dtype == 'int16': base = WeldInt16() elif dtype == 'int32': base = WeldInt() elif dtype == 'int64': base = WeldLong() elif dtype == 'float32': base = WeldFloat() elif dtype == 'float64': base = WeldDouble() elif dtype == 'bool': base = WeldBit() else: base = WeldVec(WeldChar()) # TODO: Fix this for i in xrange(obj.ndim): base = WeldVec(base) elif isinstance(obj, str): base = WeldVec(WeldChar()) else: raise Exception("Invalid object type: unable to infer NVL type") return base
168,176
Converts Python object to Weld object. Args: obj: Python object that needs to be converted to Weld format Returns: Weld formatted object
def encode(self, obj): if isinstance(obj, np.ndarray): if obj.ndim == 1 and obj.dtype == 'int16': numpy_to_weld = self.utils.numpy_to_weld_int16_arr elif obj.ndim == 1 and obj.dtype == 'int32': numpy_to_weld = self.utils.numpy_to_weld_int_arr elif obj.ndim == 1 and obj.dtype == 'int64': numpy_to_weld = self.utils.numpy_to_weld_long_arr elif obj.ndim == 1 and obj.dtype == 'float32': numpy_to_weld = self.utils.numpy_to_weld_float_arr elif obj.ndim == 1 and obj.dtype == 'float64': numpy_to_weld = self.utils.numpy_to_weld_double_arr elif obj.ndim == 2 and obj.dtype == 'int16': numpy_to_weld = self.utils.numpy_to_weld_int16_arr_arr elif obj.ndim == 2 and obj.dtype == 'int32': numpy_to_weld = self.utils.numpy_to_weld_int_arr_arr elif obj.ndim == 2 and obj.dtype == 'int64': numpy_to_weld = self.utils.numpy_to_weld_long_arr_arr elif obj.ndim == 2 and obj.dtype == 'float32': numpy_to_weld = self.utils.numpy_to_weld_float_arr_arr elif obj.ndim == 2 and obj.dtype == 'float64': numpy_to_weld = self.utils.numpy_to_weld_double_arr_arr elif obj.ndim == 2 and obj.dtype == 'bool': numpy_to_weld = self.utils.numpy_to_weld_bool_arr_arr elif obj.ndim == 1 and obj.dtype == 'bool': numpy_to_weld = self.utils.numpy_to_weld_bool_arr else: numpy_to_weld = self.utils.numpy_to_weld_char_arr_arr elif isinstance(obj, str): numpy_to_weld = self.utils.numpy_to_weld_char_arr else: raise Exception("Unable to encode; invalid object type") numpy_to_weld.restype = self.py_to_weld_type(obj).ctype_class numpy_to_weld.argtypes = [py_object] weld_vec = numpy_to_weld(obj) return weld_vec
168,177
Converts Weld object to Python object. Args: obj: Result of Weld computation that needs to be decoded restype: Type of Weld computation result raw_ptr: Boolean indicating whether obj needs to be extracted from WeldValue or not Returns: Python object representing result of the Weld computation
def decode(self, obj, restype, raw_ptr=False): if raw_ptr: data = obj else: data = cweld.WeldValue(obj).data() result = ctypes.cast(data, ctypes.POINTER(restype.ctype_class)).contents if restype == WeldInt16(): data = cweld.WeldValue(obj).data() result = ctypes.cast(data, ctypes.POINTER(c_int16)).contents.value return result elif restype == WeldInt(): data = cweld.WeldValue(obj).data() result = ctypes.cast(data, ctypes.POINTER(c_int)).contents.value return result elif restype == WeldLong(): data = cweld.WeldValue(obj).data() result = ctypes.cast(data, ctypes.POINTER(c_long)).contents.value return result elif restype == WeldFloat(): data = cweld.WeldValue(obj).data() result = ctypes.cast(data, ctypes.POINTER(c_float)).contents.value return np.float32(result) elif restype == WeldDouble(): data = cweld.WeldValue(obj).data() result = ctypes.cast(data, ctypes.POINTER(c_double)).contents.value return float(result) elif restype == WeldBit(): data = cweld.WeldValue(obj).data() result = ctypes.cast(data, ctypes.POINTER(c_bool)).contents.value return bool(result) # Obj is a WeldVec(WeldInt()).ctype_class, which is a subclass of # ctypes._structure if restype == WeldVec(WeldBit()): weld_to_numpy = self.utils.weld_to_numpy_bool_arr elif restype == WeldVec(WeldInt16()): weld_to_numpy = self.utils.weld_to_numpy_int16_arr elif restype == WeldVec(WeldInt()): weld_to_numpy = self.utils.weld_to_numpy_int_arr elif restype == WeldVec(WeldLong()): weld_to_numpy = self.utils.weld_to_numpy_long_arr elif restype == WeldVec(WeldFloat()): weld_to_numpy = self.utils.weld_to_numpy_float_arr elif restype == WeldVec(WeldDouble()): weld_to_numpy = self.utils.weld_to_numpy_double_arr elif restype == WeldVec(WeldVec(WeldChar())): weld_to_numpy = self.utils.weld_to_numpy_char_arr_arr elif restype == WeldVec(WeldVec(WeldInt16())): weld_to_numpy = self.utils.weld_to_numpy_int16_arr_arr elif restype == WeldVec(WeldVec(WeldInt())): weld_to_numpy = self.utils.weld_to_numpy_int_arr_arr elif restype == WeldVec(WeldVec(WeldLong())): weld_to_numpy = self.utils.weld_to_numpy_long_arr_arr elif restype == WeldVec(WeldVec(WeldFloat())): weld_to_numpy = self.utils.weld_to_numpy_float_arr_arr elif restype == WeldVec(WeldVec(WeldDouble())): weld_to_numpy = self.utils.weld_to_numpy_double_arr_arr elif restype == WeldVec(WeldVec(WeldBit())): weld_to_numpy = self.utils.weld_to_numpy_bool_arr_arr elif isinstance(restype, WeldStruct): ret_vecs = [] # Iterate through all fields in the struct, and recursively call # decode. for field_type in restype.field_types: ret_vec = self.decode(data, field_type, raw_ptr=True) data += sizeof(field_type.ctype_class()) ret_vecs.append(ret_vec) return tuple(ret_vecs) else: raise Exception("Unable to decode; invalid return type") weld_to_numpy.restype = py_object weld_to_numpy.argtypes = [restype.ctype_class] ret_vec = weld_to_numpy(result) return ret_vec
168,178
Updates the global list of line error-suppressions. Parses any NOLINT comments on the current line, updating the global error_suppressions store. Reports an error if the NOLINT comment was malformed. Args: filename: str, the name of the input file. raw_line: str, the line of input text, with comments. linenum: int, the number of the current line. error: function, an error handler.
def ParseNolintSuppressions(filename, raw_line, linenum, error): matched = Search(r'\bNOLINT(NEXTLINE)?\b(\([^)]+\))?', raw_line) if matched: if matched.group(1): suppressed_line = linenum + 1 else: suppressed_line = linenum category = matched.group(2) if category in (None, '(*)'): # => "suppress all" _error_suppressions.setdefault(None, set()).add(suppressed_line) else: if category.startswith('(') and category.endswith(')'): category = category[1:-1] if category in _ERROR_CATEGORIES: _error_suppressions.setdefault(category, set()).add(suppressed_line) elif category not in _LEGACY_ERROR_CATEGORIES: error(filename, linenum, 'readability/nolint', 5, 'Unknown NOLINT error category: %s' % category)
170,130
Updates the list of global error suppressions. Parses any lint directives in the file that have global effect. Args: lines: An array of strings, each representing a line of the file, with the last element being empty if the file is terminated with a newline.
def ProcessGlobalSuppresions(lines): for line in lines: if _SEARCH_C_FILE.search(line): for category in _DEFAULT_C_SUPPRESSED_CATEGORIES: _global_error_suppressions[category] = True if _SEARCH_KERNEL_FILE.search(line): for category in _DEFAULT_KERNEL_SUPPRESSED_CATEGORIES: _global_error_suppressions[category] = True
170,131
Returns true if the specified error category is suppressed on this line. Consults the global error_suppressions map populated by ParseNolintSuppressions/ProcessGlobalSuppresions/ResetNolintSuppressions. Args: category: str, the category of the error. linenum: int, the current line number. Returns: bool, True iff the error should be suppressed due to a NOLINT comment or global suppression.
def IsErrorSuppressedByNolint(category, linenum): return (_global_error_suppressions.get(category, False) or linenum in _error_suppressions.get(category, set()) or linenum in _error_suppressions.get(None, set()))
170,132
Replaces instances of pattern in a string with a replacement. The compiled regex is kept in a cache shared by Match and Search. Args: pattern: regex pattern rep: replacement text s: search string Returns: string with replacements made (or original string if no replacements)
def ReplaceAll(pattern, rep, s): if pattern not in _regexp_compile_cache: _regexp_compile_cache[pattern] = sre_compile.compile(pattern) return _regexp_compile_cache[pattern].sub(rep, s)
170,134
Does line terminate so, that the next symbol is in string constant. This function does not consider single-line nor multi-line comments. Args: line: is a partial line of code starting from the 0..n. Returns: True, if next character appended to 'line' is inside a string constant.
def IsCppString(line): line = line.replace(r'\\', 'XX') # after this, \\" does not match to \" return ((line.count('"') - line.count(r'\"') - line.count("'\"'")) & 1) == 1
170,137
Removes C++11 raw strings from lines. Before: static const char kData[] = R"( multi-line string )"; After: static const char kData[] = "" (replaced by blank line) ""; Args: raw_lines: list of raw lines. Returns: list of lines with C++11 raw strings replaced by empty strings.
def CleanseRawStrings(raw_lines): delimiter = None lines_without_raw_strings = [] for line in raw_lines: if delimiter: # Inside a raw string, look for the end end = line.find(delimiter) if end >= 0: # Found the end of the string, match leading space for this # line and resume copying the original lines, and also insert # a "" on the last line. leading_space = Match(r'^(\s*)\S', line) line = leading_space.group(1) + '""' + line[end + len(delimiter):] delimiter = None else: # Haven't found the end yet, append a blank line. line = '""' # Look for beginning of a raw string, and replace them with # empty strings. This is done in a loop to handle multiple raw # strings on the same line. while delimiter is None: # Look for beginning of a raw string. # See 2.14.15 [lex.string] for syntax. # # Once we have matched a raw string, we check the prefix of the # line to make sure that the line is not part of a single line # comment. It's done this way because we remove raw strings # before removing comments as opposed to removing comments # before removing raw strings. This is because there are some # cpplint checks that requires the comments to be preserved, but # we don't want to check comments that are inside raw strings. matched = Match(r'^(.*?)\b(?:R|u8R|uR|UR|LR)"([^\s\\()]*)\((.*)$', line) if (matched and not Match(r'^([^\'"]|\'(\\.|[^\'])*\'|"(\\.|[^"])*")*//', matched.group(1))): delimiter = ')' + matched.group(2) + '"' end = matched.group(3).find(delimiter) if end >= 0: # Raw string ended on same line line = (matched.group(1) + '""' + matched.group(3)[end + len(delimiter):]) delimiter = None else: # Start of a multi-line raw string line = matched.group(1) + '""' else: break lines_without_raw_strings.append(line) # TODO(unknown): if delimiter is not None here, we might want to # emit a warning for unterminated string. return lines_without_raw_strings
170,138
Removes //-comments and single-line C-style /* */ comments. Args: line: A line of C++ source. Returns: The line with single-line comments removed.
def CleanseComments(line): commentpos = line.find('//') if commentpos != -1 and not IsCppString(line[:commentpos]): line = line[:commentpos].rstrip() # get rid of /* ... */ return _RE_PATTERN_CLEANSE_LINE_C_COMMENTS.sub('', line)
170,143
Find the position just after the end of current parenthesized expression. Args: line: a CleansedLines line. startpos: start searching at this position. stack: nesting stack at startpos. Returns: On finding matching end: (index just after matching end, None) On finding an unclosed expression: (-1, None) Otherwise: (-1, new stack at end of this line)
def FindEndOfExpressionInLine(line, startpos, stack): for i in xrange(startpos, len(line)): char = line[i] if char in '([{': # Found start of parenthesized expression, push to expression stack stack.append(char) elif char == '<': # Found potential start of template argument list if i > 0 and line[i - 1] == '<': # Left shift operator if stack and stack[-1] == '<': stack.pop() if not stack: return (-1, None) elif i > 0 and Search(r'\boperator\s*$', line[0:i]): # operator<, don't add to stack continue else: # Tentative start of template argument list stack.append('<') elif char in ')]}': # Found end of parenthesized expression. # # If we are currently expecting a matching '>', the pending '<' # must have been an operator. Remove them from expression stack. while stack and stack[-1] == '<': stack.pop() if not stack: return (-1, None) if ((stack[-1] == '(' and char == ')') or (stack[-1] == '[' and char == ']') or (stack[-1] == '{' and char == '}')): stack.pop() if not stack: return (i + 1, None) else: # Mismatched parentheses return (-1, None) elif char == '>': # Found potential end of template argument list. # Ignore "->" and operator functions if (i > 0 and (line[i - 1] == '-' or Search(r'\boperator\s*$', line[0:i - 1]))): continue # Pop the stack if there is a matching '<'. Otherwise, ignore # this '>' since it must be an operator. if stack: if stack[-1] == '<': stack.pop() if not stack: return (i + 1, None) elif char == ';': # Found something that look like end of statements. If we are currently # expecting a '>', the matching '<' must have been an operator, since # template argument list should not contain statements. while stack and stack[-1] == '<': stack.pop() if not stack: return (-1, None) # Did not find end of expression or unbalanced parentheses on this line return (-1, stack)
170,144
Find position at the matching start of current expression. This is almost the reverse of FindEndOfExpressionInLine, but note that the input position and returned position differs by 1. Args: line: a CleansedLines line. endpos: start searching at this position. stack: nesting stack at endpos. Returns: On finding matching start: (index at matching start, None) On finding an unclosed expression: (-1, None) Otherwise: (-1, new stack at beginning of this line)
def FindStartOfExpressionInLine(line, endpos, stack): i = endpos while i >= 0: char = line[i] if char in ')]}': # Found end of expression, push to expression stack stack.append(char) elif char == '>': # Found potential end of template argument list. # # Ignore it if it's a "->" or ">=" or "operator>" if (i > 0 and (line[i - 1] == '-' or Match(r'\s>=\s', line[i - 1:]) or Search(r'\boperator\s*$', line[0:i]))): i -= 1 else: stack.append('>') elif char == '<': # Found potential start of template argument list if i > 0 and line[i - 1] == '<': # Left shift operator i -= 1 else: # If there is a matching '>', we can pop the expression stack. # Otherwise, ignore this '<' since it must be an operator. if stack and stack[-1] == '>': stack.pop() if not stack: return (i, None) elif char in '([{': # Found start of expression. # # If there are any unmatched '>' on the stack, they must be # operators. Remove those. while stack and stack[-1] == '>': stack.pop() if not stack: return (-1, None) if ((char == '(' and stack[-1] == ')') or (char == '[' and stack[-1] == ']') or (char == '{' and stack[-1] == '}')): stack.pop() if not stack: return (i, None) else: # Mismatched parentheses return (-1, None) elif char == ';': # Found something that look like end of statements. If we are currently # expecting a '<', the matching '>' must have been an operator, since # template argument list should not contain statements. while stack and stack[-1] == '>': stack.pop() if not stack: return (-1, None) i -= 1 return (-1, stack)
170,146
Return the number of leading spaces in line. Args: line: A string to check. Returns: An integer count of leading spaces, possibly zero.
def GetIndentLevel(line): indent = Match(r'^( *)\S', line) if indent: return len(indent.group(1)) else: return 0
170,149
Returns the CPP variable that should be used as a header guard. Args: filename: The name of a C++ header file. Returns: The CPP variable that should be used as a header guard in the named file.
def GetHeaderGuardCPPVariable(filename): # Restores original filename in case that cpplint is invoked from Emacs's # flymake. filename = re.sub(r'_flymake\.h$', '.h', filename) filename = re.sub(r'/\.flymake/([^/]*)$', r'/\1', filename) # Replace 'c++' with 'cpp'. filename = filename.replace('C++', 'cpp').replace('c++', 'cpp') fileinfo = FileInfo(filename) file_path_from_root = fileinfo.RepositoryName() if _root: suffix = os.sep # On Windows using directory separator will leave us with # "bogus escape error" unless we properly escape regex. if suffix == '\\': suffix += '\\' file_path_from_root = re.sub('^' + _root + suffix, '', file_path_from_root) return re.sub(r'[^a-zA-Z0-9]', '_', file_path_from_root).upper() + '_'
170,150
Checks that the file contains a header guard. Logs an error if no #ifndef header guard is present. For other headers, checks that the full pathname is used. Args: filename: The name of the C++ header file. clean_lines: A CleansedLines instance containing the file. error: The function to call with any errors found.
def CheckForHeaderGuard(filename, clean_lines, error): # Don't check for header guards if there are error suppression # comments somewhere in this file. # # Because this is silencing a warning for a nonexistent line, we # only support the very specific NOLINT(build/header_guard) syntax, # and not the general NOLINT or NOLINT(*) syntax. raw_lines = clean_lines.lines_without_raw_strings for i in raw_lines: if Search(r'//\s*NOLINT\(build/header_guard\)', i): return # Allow pragma once instead of header guards for i in raw_lines: if Search(r'^\s*#pragma\s+once', i): return cppvar = GetHeaderGuardCPPVariable(filename) ifndef = '' ifndef_linenum = 0 define = '' endif = '' endif_linenum = 0 for linenum, line in enumerate(raw_lines): linesplit = line.split() if len(linesplit) >= 2: # find the first occurrence of #ifndef and #define, save arg if not ifndef and linesplit[0] == '#ifndef': # set ifndef to the header guard presented on the #ifndef line. ifndef = linesplit[1] ifndef_linenum = linenum if not define and linesplit[0] == '#define': define = linesplit[1] # find the last occurrence of #endif, save entire line if line.startswith('#endif'): endif = line endif_linenum = linenum if not ifndef or not define or ifndef != define: error(filename, 0, 'build/header_guard', 5, 'No #ifndef header guard found, suggested CPP variable is: %s' % cppvar) return # The guard should be PATH_FILE_H_, but we also allow PATH_FILE_H__ # for backward compatibility. if ifndef != cppvar: error_level = 0 if ifndef != cppvar + '_': error_level = 5 ParseNolintSuppressions(filename, raw_lines[ifndef_linenum], ifndef_linenum, error) error(filename, ifndef_linenum, 'build/header_guard', error_level, '#ifndef header guard has wrong style, please use: %s' % cppvar) # Check for "//" comments on endif line. ParseNolintSuppressions(filename, raw_lines[endif_linenum], endif_linenum, error) match = Match(r'#endif\s*//\s*' + cppvar + r'(_)?\b', endif) if match: if match.group(1) == '_': # Issue low severity warning for deprecated double trailing underscore error(filename, endif_linenum, 'build/header_guard', 0, '#endif line should be "#endif // %s"' % cppvar) return # Didn't find the corresponding "//" comment. If this file does not # contain any "//" comments at all, it could be that the compiler # only wants "/**/" comments, look for those instead. no_single_line_comments = True for i in xrange(1, len(raw_lines) - 1): line = raw_lines[i] if Match(r'^(?:(?:\'(?:\.|[^\'])*\')|(?:"(?:\.|[^"])*")|[^\'"])*//', line): no_single_line_comments = False break if no_single_line_comments: match = Match(r'#endif\s*/\*\s*' + cppvar + r'(_)?\s*\*/', endif) if match: if match.group(1) == '_': # Low severity warning for double trailing underscore error(filename, endif_linenum, 'build/header_guard', 0, '#endif line should be "#endif /* %s */"' % cppvar) return # Didn't find anything error(filename, endif_linenum, 'build/header_guard', 5, '#endif line should be "#endif // %s"' % cppvar)
170,151
Logs an error if there is no newline char at the end of the file. Args: filename: The name of the current file. lines: An array of strings, each representing a line of the file. error: The function to call with any errors found.
def CheckForNewlineAtEOF(filename, lines, error): # The array lines() was created by adding two newlines to the # original file (go figure), then splitting on \n. # To verify that the file ends in \n, we just have to make sure the # last-but-two element of lines() exists and is empty. if len(lines) < 3 or lines[-2]: error(filename, len(lines) - 2, 'whitespace/ending_newline', 5, 'Could not find a newline character at the end of the file.')
170,154
Checks that VLOG() is only used for defining a logging level. For example, VLOG(2) is correct. VLOG(INFO), VLOG(WARNING), VLOG(ERROR), and VLOG(FATAL) are not. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found.
def CheckVlogArguments(filename, clean_lines, linenum, error): line = clean_lines.elided[linenum] if Search(r'\bVLOG\((INFO|ERROR|WARNING|DFATAL|FATAL)\)', line): error(filename, linenum, 'runtime/vlog', 5, 'VLOG() should be used with numeric verbosity level. ' 'Use LOG() if you want symbolic severity levels.')
170,157
Checks for invalid increment *count++. For example following function: void increment_counter(int* count) { *count++; } is invalid, because it effectively does count++, moving pointer, and should be replaced with ++*count, (*count)++ or *count += 1. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found.
def CheckInvalidIncrement(filename, clean_lines, linenum, error): line = clean_lines.elided[linenum] if _RE_PATTERN_INVALID_INCREMENT.match(line): error(filename, linenum, 'runtime/invalid_increment', 5, 'Changing pointer instead of value (or unused value of operator*).')
170,158
Checks for the correctness of various spacing around function calls. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found.
def CheckSpacingForFunctionCall(filename, clean_lines, linenum, error): line = clean_lines.elided[linenum] # Since function calls often occur inside if/for/while/switch # expressions - which have their own, more liberal conventions - we # first see if we should be looking inside such an expression for a # function call, to which we can apply more strict standards. fncall = line # if there's no control flow construct, look at whole line for pattern in (r'\bif\s*\((.*)\)\s*{', r'\bfor\s*\((.*)\)\s*{', r'\bwhile\s*\((.*)\)\s*[{;]', r'\bswitch\s*\((.*)\)\s*{'): match = Search(pattern, line) if match: fncall = match.group(1) # look inside the parens for function calls break # Except in if/for/while/switch, there should never be space # immediately inside parens (eg "f( 3, 4 )"). We make an exception # for nested parens ( (a+b) + c ). Likewise, there should never be # a space before a ( when it's a function argument. I assume it's a # function argument when the char before the whitespace is legal in # a function name (alnum + _) and we're not starting a macro. Also ignore # pointers and references to arrays and functions coz they're too tricky: # we use a very simple way to recognize these: # " (something)(maybe-something)" or # " (something)(maybe-something," or # " (something)[something]" # Note that we assume the contents of [] to be short enough that # they'll never need to wrap. if ( # Ignore control structures. not Search(r'\b(if|for|while|switch|return|new|delete|catch|sizeof)\b', fncall) and # Ignore pointers/references to functions. not Search(r' \([^)]+\)\([^)]*(\)|,$)', fncall) and # Ignore pointers/references to arrays. not Search(r' \([^)]+\)\[[^\]]+\]', fncall)): if Search(r'\w\s*\(\s(?!\s*\\$)', fncall): # a ( used for a fn call error(filename, linenum, 'whitespace/parens', 4, 'Extra space after ( in function call') elif Search(r'\(\s+(?!(\s*\\)|\()', fncall): error(filename, linenum, 'whitespace/parens', 2, 'Extra space after (') if (Search(r'\w\s+\(', fncall) and not Search(r'_{0,2}asm_{0,2}\s+_{0,2}volatile_{0,2}\s+\(', fncall) and not Search(r'#\s*define|typedef|using\s+\w+\s*=', fncall) and not Search(r'\w\s+\((\w+::)*\*\w+\)\(', fncall) and not Search(r'\bcase\s+\(', fncall)): # TODO(unknown): Space after an operator function seem to be a common # error, silence those for now by restricting them to highest verbosity. if Search(r'\boperator_*\b', line): error(filename, linenum, 'whitespace/parens', 0, 'Extra space before ( in function call') else: error(filename, linenum, 'whitespace/parens', 4, 'Extra space before ( in function call') # If the ) is followed only by a newline or a { + newline, assume it's # part of a control statement (if/while/etc), and don't complain if Search(r'[^)]\s+\)\s*[^{\s]', fncall): # If the closing parenthesis is preceded by only whitespaces, # try to give a more descriptive error message. if Search(r'^\s+\)', fncall): error(filename, linenum, 'whitespace/parens', 2, 'Closing ) should be moved to the previous line') else: error(filename, linenum, 'whitespace/parens', 2, 'Extra space before )')
170,160
Checks for common mistakes in comments. Args: line: The line in question. filename: The name of the current file. linenum: The number of the line to check. next_line_start: The first non-whitespace column of the next line. error: The function to call with any errors found.
def CheckComment(line, filename, linenum, next_line_start, error): commentpos = line.find('//') if commentpos != -1: # Check if the // may be in quotes. If so, ignore it if re.sub(r'\\.', '', line[0:commentpos]).count('"') % 2 == 0: # Allow one space for new scopes, two spaces otherwise: if (not (Match(r'^.*{ *//', line) and next_line_start == commentpos) and ((commentpos >= 1 and line[commentpos-1] not in string.whitespace) or (commentpos >= 2 and line[commentpos-2] not in string.whitespace))): error(filename, linenum, 'whitespace/comments', 2, 'At least two spaces is best between code and comments') # Checks for common mistakes in TODO comments. comment = line[commentpos:] match = _RE_PATTERN_TODO.match(comment) if match: # One whitespace is correct; zero whitespace is handled elsewhere. leading_whitespace = match.group(1) if len(leading_whitespace) > 1: error(filename, linenum, 'whitespace/todo', 2, 'Too many spaces before TODO') username = match.group(2) if not username: error(filename, linenum, 'readability/todo', 2, 'Missing username in TODO; it should look like ' '"// TODO(my_username): Stuff."') middle_whitespace = match.group(3) # Comparisons made explicit for correctness -- pylint: disable=g-explicit-bool-comparison if middle_whitespace != ' ' and middle_whitespace != '': error(filename, linenum, 'whitespace/todo', 2, 'TODO(my_username) should be followed by a space') # If the comment contains an alphanumeric character, there # should be a space somewhere between it and the // unless # it's a /// or //! Doxygen comment. if (Match(r'//[^ ]*\w', comment) and not Match(r'(///|//\!)(\s+|$)', comment)): error(filename, linenum, 'whitespace/comments', 4, 'Should have a space between // and comment')
170,163
Checks for improper use of DISALLOW* macros. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. nesting_state: A NestingState instance which maintains information about the current stack of nested blocks being parsed. error: The function to call with any errors found.
def CheckAccess(filename, clean_lines, linenum, nesting_state, error): line = clean_lines.elided[linenum] # get rid of comments and strings matched = Match((r'\s*(DISALLOW_COPY_AND_ASSIGN|' r'DISALLOW_IMPLICIT_CONSTRUCTORS)'), line) if not matched: return if nesting_state.stack and isinstance(nesting_state.stack[-1], _ClassInfo): if nesting_state.stack[-1].access != 'private': error(filename, linenum, 'readability/constructors', 3, '%s must be in the private: section' % matched.group(1)) else: # Found DISALLOW* macro outside a class declaration, or perhaps it # was used inside a function when it should have been part of the # class declaration. We could issue a warning here, but it # probably resulted in a compiler error already. pass
170,164
Checks for horizontal spacing around parentheses. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found.
def CheckParenthesisSpacing(filename, clean_lines, linenum, error): line = clean_lines.elided[linenum] # No spaces after an if, while, switch, or for match = Search(r' (if\(|for\(|while\(|switch\()', line) if match: error(filename, linenum, 'whitespace/parens', 5, 'Missing space before ( in %s' % match.group(1)) # For if/for/while/switch, the left and right parens should be # consistent about how many spaces are inside the parens, and # there should either be zero or one spaces inside the parens. # We don't want: "if ( foo)" or "if ( foo )". # Exception: "for ( ; foo; bar)" and "for (foo; bar; )" are allowed. match = Search(r'\b(if|for|while|switch)\s*' r'\(([ ]*)(.).*[^ ]+([ ]*)\)\s*{\s*$', line) if match: if len(match.group(2)) != len(match.group(4)): if not (match.group(3) == ';' and len(match.group(2)) == 1 + len(match.group(4)) or not match.group(2) and Search(r'\bfor\s*\(.*; \)', line)): error(filename, linenum, 'whitespace/parens', 5, 'Mismatching spaces inside () in %s' % match.group(1)) if len(match.group(2)) not in [0, 1]: error(filename, linenum, 'whitespace/parens', 5, 'Should have zero or one spaces inside ( and ) in %s' % match.group(1))
170,166
Checks for horizontal spacing near commas and semicolons. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found.
def CheckCommaSpacing(filename, clean_lines, linenum, error): raw = clean_lines.lines_without_raw_strings line = clean_lines.elided[linenum] # You should always have a space after a comma (either as fn arg or operator) # # This does not apply when the non-space character following the # comma is another comma, since the only time when that happens is # for empty macro arguments. # # We run this check in two passes: first pass on elided lines to # verify that lines contain missing whitespaces, second pass on raw # lines to confirm that those missing whitespaces are not due to # elided comments. if (Search(r',[^,\s]', ReplaceAll(r'\boperator\s*,\s*\(', 'F(', line)) and Search(r',[^,\s]', raw[linenum])): error(filename, linenum, 'whitespace/comma', 3, 'Missing space after ,') # You should always have a space after a semicolon # except for few corner cases # TODO(unknown): clarify if 'if (1) { return 1;}' is requires one more # space after ; if Search(r';[^\s};\\)/]', line): error(filename, linenum, 'whitespace/semicolon', 3, 'Missing space after ;')
170,167
Check if expression looks like a type name, returns true if so. Args: clean_lines: A CleansedLines instance containing the file. nesting_state: A NestingState instance which maintains information about the current stack of nested blocks being parsed. expr: The expression to check. Returns: True, if token looks like a type.
def _IsType(clean_lines, nesting_state, expr): # Keep only the last token in the expression last_word = Match(r'^.*(\b\S+)$', expr) if last_word: token = last_word.group(1) else: token = expr # Match native types and stdint types if _TYPES.match(token): return True # Try a bit harder to match templated types. Walk up the nesting # stack until we find something that resembles a typename # declaration for what we are looking for. typename_pattern = (r'\b(?:typename|class|struct)\s+' + re.escape(token) + r'\b') block_index = len(nesting_state.stack) - 1 while block_index >= 0: if isinstance(nesting_state.stack[block_index], _NamespaceInfo): return False # Found where the opening brace is. We want to scan from this # line up to the beginning of the function, minus a few lines. # template <typename Type1, // stop scanning here # ...> # class C # : public ... { // start scanning here last_line = nesting_state.stack[block_index].starting_linenum next_block_start = 0 if block_index > 0: next_block_start = nesting_state.stack[block_index - 1].starting_linenum first_line = last_line while first_line >= next_block_start: if clean_lines.elided[first_line].find('template') >= 0: break first_line -= 1 if first_line < next_block_start: # Didn't find any "template" keyword before reaching the next block, # there are probably no template things to check for this block block_index -= 1 continue # Look for typename in the specified range for i in xrange(first_line, last_line + 1, 1): if Search(typename_pattern, clean_lines.elided[i]): return True block_index -= 1 return False
170,168
Checks for horizontal spacing near commas. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. nesting_state: A NestingState instance which maintains information about the current stack of nested blocks being parsed. error: The function to call with any errors found.
def CheckBracesSpacing(filename, clean_lines, linenum, nesting_state, error): line = clean_lines.elided[linenum] # Except after an opening paren, or after another opening brace (in case of # an initializer list, for instance), you should have spaces before your # braces when they are delimiting blocks, classes, namespaces etc. # And since you should never have braces at the beginning of a line, # this is an easy test. Except that braces used for initialization don't # follow the same rule; we often don't want spaces before those. match = Match(r'^(.*[^ ({>]){', line) if match: # Try a bit harder to check for brace initialization. This # happens in one of the following forms: # Constructor() : initializer_list_{} { ... } # Constructor{}.MemberFunction() # Type variable{}; # FunctionCall(type{}, ...); # LastArgument(..., type{}); # LOG(INFO) << type{} << " ..."; # map_of_type[{...}] = ...; # ternary = expr ? new type{} : nullptr; # OuterTemplate<InnerTemplateConstructor<Type>{}> # # We check for the character following the closing brace, and # silence the warning if it's one of those listed above, i.e. # "{.;,)<>]:". # # To account for nested initializer list, we allow any number of # closing braces up to "{;,)<". We can't simply silence the # warning on first sight of closing brace, because that would # cause false negatives for things that are not initializer lists. # Silence this: But not this: # Outer{ if (...) { # Inner{...} if (...){ // Missing space before { # }; } # # There is a false negative with this approach if people inserted # spurious semicolons, e.g. "if (cond){};", but we will catch the # spurious semicolon with a separate check. leading_text = match.group(1) (endline, endlinenum, endpos) = CloseExpression( clean_lines, linenum, len(match.group(1))) trailing_text = '' if endpos > -1: trailing_text = endline[endpos:] for offset in xrange(endlinenum + 1, min(endlinenum + 3, clean_lines.NumLines() - 1)): trailing_text += clean_lines.elided[offset] # We also suppress warnings for `uint64_t{expression}` etc., as the style # guide recommends brace initialization for integral types to avoid # overflow/truncation. if (not Match(r'^[\s}]*[{.;,)<>\]:]', trailing_text) and not _IsType(clean_lines, nesting_state, leading_text)): error(filename, linenum, 'whitespace/braces', 5, 'Missing space before {') # Make sure '} else {' has spaces. if Search(r'}else', line): error(filename, linenum, 'whitespace/braces', 5, 'Missing space before else') # You shouldn't have a space before a semicolon at the end of the line. # There's a special case for "for" since the style guide allows space before # the semicolon there. if Search(r':\s*;\s*$', line): error(filename, linenum, 'whitespace/semicolon', 5, 'Semicolon defining empty statement. Use {} instead.') elif Search(r'^\s*;\s*$', line): error(filename, linenum, 'whitespace/semicolon', 5, 'Line contains only semicolon. If this should be an empty statement, ' 'use {} instead.') elif (Search(r'\s+;\s*$', line) and not Search(r'\bfor\b', line)): error(filename, linenum, 'whitespace/semicolon', 5, 'Extra space before last semicolon. If this should be an empty ' 'statement, use {} instead.')
170,169
Check if the token ending on (linenum, column) is decltype(). Args: clean_lines: A CleansedLines instance containing the file. linenum: the number of the line to check. column: end column of the token to check. Returns: True if this token is decltype() expression, False otherwise.
def IsDecltype(clean_lines, linenum, column): (text, _, start_col) = ReverseCloseExpression(clean_lines, linenum, column) if start_col < 0: return False if Search(r'\bdecltype\s*$', text[0:start_col]): return True return False
170,170
Checks for additional blank line issues related to sections. Currently the only thing checked here is blank line before protected/private. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. class_info: A _ClassInfo objects. linenum: The number of the line to check. error: The function to call with any errors found.
def CheckSectionSpacing(filename, clean_lines, class_info, linenum, error): # Skip checks if the class is small, where small means 25 lines or less. # 25 lines seems like a good cutoff since that's the usual height of # terminals, and any class that can't fit in one screen can't really # be considered "small". # # Also skip checks if we are on the first line. This accounts for # classes that look like # class Foo { public: ... }; # # If we didn't find the end of the class, last_line would be zero, # and the check will be skipped by the first condition. if (class_info.last_line - class_info.starting_linenum <= 24 or linenum <= class_info.starting_linenum): return matched = Match(r'\s*(public|protected|private):', clean_lines.lines[linenum]) if matched: # Issue warning if the line before public/protected/private was # not a blank line, but don't do this if the previous line contains # "class" or "struct". This can happen two ways: # - We are at the beginning of the class. # - We are forward-declaring an inner class that is semantically # private, but needed to be public for implementation reasons. # Also ignores cases where the previous line ends with a backslash as can be # common when defining classes in C macros. prev_line = clean_lines.lines[linenum - 1] if (not IsBlankLine(prev_line) and not Search(r'\b(class|struct)\b', prev_line) and not Search(r'\\$', prev_line)): # Try a bit harder to find the beginning of the class. This is to # account for multi-line base-specifier lists, e.g.: # class Derived # : public Base { end_class_head = class_info.starting_linenum for i in range(class_info.starting_linenum, linenum): if Search(r'\{\s*$', clean_lines.lines[i]): end_class_head = i break if end_class_head < linenum - 1: error(filename, linenum, 'whitespace/blank_line', 3, '"%s:" should be preceded by a blank line' % matched.group(1))
170,171
Return the most recent non-blank line and its line number. Args: clean_lines: A CleansedLines instance containing the file contents. linenum: The number of the line to check. Returns: A tuple with two elements. The first element is the contents of the last non-blank line before the current line, or the empty string if this is the first non-blank line. The second is the line number of that line, or -1 if this is the first non-blank line.
def GetPreviousNonBlankLine(clean_lines, linenum): prevlinenum = linenum - 1 while prevlinenum >= 0: prevline = clean_lines.elided[prevlinenum] if not IsBlankLine(prevline): # if not a blank line... return (prevline, prevlinenum) prevlinenum -= 1 return ('', -1)
170,172
Looks for redundant trailing semicolon. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found.
def CheckTrailingSemicolon(filename, clean_lines, linenum, error): line = clean_lines.elided[linenum] # Block bodies should not be followed by a semicolon. Due to C++11 # brace initialization, there are more places where semicolons are # required than not, so we use a whitelist approach to check these # rather than a blacklist. These are the places where "};" should # be replaced by just "}": # 1. Some flavor of block following closing parenthesis: # for (;;) {}; # while (...) {}; # switch (...) {}; # Function(...) {}; # if (...) {}; # if (...) else if (...) {}; # # 2. else block: # if (...) else {}; # # 3. const member function: # Function(...) const {}; # # 4. Block following some statement: # x = 42; # {}; # # 5. Block at the beginning of a function: # Function(...) { # {}; # } # # Note that naively checking for the preceding "{" will also match # braces inside multi-dimensional arrays, but this is fine since # that expression will not contain semicolons. # # 6. Block following another block: # while (true) {} # {}; # # 7. End of namespaces: # namespace {}; # # These semicolons seems far more common than other kinds of # redundant semicolons, possibly due to people converting classes # to namespaces. For now we do not warn for this case. # # Try matching case 1 first. match = Match(r'^(.*\)\s*)\{', line) if match: # Matched closing parenthesis (case 1). Check the token before the # matching opening parenthesis, and don't warn if it looks like a # macro. This avoids these false positives: # - macro that defines a base class # - multi-line macro that defines a base class # - macro that defines the whole class-head # # But we still issue warnings for macros that we know are safe to # warn, specifically: # - TEST, TEST_F, TEST_P, MATCHER, MATCHER_P # - TYPED_TEST # - INTERFACE_DEF # - EXCLUSIVE_LOCKS_REQUIRED, SHARED_LOCKS_REQUIRED, LOCKS_EXCLUDED: # # We implement a whitelist of safe macros instead of a blacklist of # unsafe macros, even though the latter appears less frequently in # google code and would have been easier to implement. This is because # the downside for getting the whitelist wrong means some extra # semicolons, while the downside for getting the blacklist wrong # would result in compile errors. # # In addition to macros, we also don't want to warn on # - Compound literals # - Lambdas # - alignas specifier with anonymous structs # - decltype closing_brace_pos = match.group(1).rfind(')') opening_parenthesis = ReverseCloseExpression( clean_lines, linenum, closing_brace_pos) if opening_parenthesis[2] > -1: line_prefix = opening_parenthesis[0][0:opening_parenthesis[2]] macro = Search(r'\b([A-Z_][A-Z0-9_]*)\s*$', line_prefix) func = Match(r'^(.*\])\s*$', line_prefix) if ((macro and macro.group(1) not in ( 'TEST', 'TEST_F', 'MATCHER', 'MATCHER_P', 'TYPED_TEST', 'EXCLUSIVE_LOCKS_REQUIRED', 'SHARED_LOCKS_REQUIRED', 'LOCKS_EXCLUDED', 'INTERFACE_DEF')) or (func and not Search(r'\boperator\s*\[\s*\]', func.group(1))) or Search(r'\b(?:struct|union)\s+alignas\s*$', line_prefix) or Search(r'\bdecltype$', line_prefix) or Search(r'\s+=\s*$', line_prefix)): match = None if (match and opening_parenthesis[1] > 1 and Search(r'\]\s*$', clean_lines.elided[opening_parenthesis[1] - 1])): # Multi-line lambda-expression match = None else: # Try matching cases 2-3. match = Match(r'^(.*(?:else|\)\s*const)\s*)\{', line) if not match: # Try matching cases 4-6. These are always matched on separate lines. # # Note that we can't simply concatenate the previous line to the # current line and do a single match, otherwise we may output # duplicate warnings for the blank line case: # if (cond) { # // blank line # } prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0] if prevline and Search(r'[;{}]\s*$', prevline): match = Match(r'^(\s*)\{', line) # Check matching closing brace if match: (endline, endlinenum, endpos) = CloseExpression( clean_lines, linenum, len(match.group(1))) if endpos > -1 and Match(r'^\s*;', endline[endpos:]): # Current {} pair is eligible for semicolon check, and we have found # the redundant semicolon, output warning here. # # Note: because we are scanning forward for opening braces, and # outputting warnings for the matching closing brace, if there are # nested blocks with trailing semicolons, we will get the error # messages in reversed order. # We need to check the line forward for NOLINT raw_lines = clean_lines.raw_lines ParseNolintSuppressions(filename, raw_lines[endlinenum-1], endlinenum-1, error) ParseNolintSuppressions(filename, raw_lines[endlinenum], endlinenum, error) error(filename, endlinenum, 'readability/braces', 4, "You don't need a ; after a }")
170,173
Look for empty loop/conditional body with only a single semicolon. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found.
def CheckEmptyBlockBody(filename, clean_lines, linenum, error): # Search for loop keywords at the beginning of the line. Because only # whitespaces are allowed before the keywords, this will also ignore most # do-while-loops, since those lines should start with closing brace. # # We also check "if" blocks here, since an empty conditional block # is likely an error. line = clean_lines.elided[linenum] matched = Match(r'\s*(for|while|if)\s*\(', line) if matched: # Find the end of the conditional expression. (end_line, end_linenum, end_pos) = CloseExpression( clean_lines, linenum, line.find('(')) # Output warning if what follows the condition expression is a semicolon. # No warning for all other cases, including whitespace or newline, since we # have a separate check for semicolons preceded by whitespace. if end_pos >= 0 and Match(r';', end_line[end_pos:]): if matched.group(1) == 'if': error(filename, end_linenum, 'whitespace/empty_conditional_body', 5, 'Empty conditional bodies should use {}') else: error(filename, end_linenum, 'whitespace/empty_loop_body', 5, 'Empty loop bodies should use {} or continue') # Check for if statements that have completely empty bodies (no comments) # and no else clauses. if end_pos >= 0 and matched.group(1) == 'if': # Find the position of the opening { for the if statement. # Return without logging an error if it has no brackets. opening_linenum = end_linenum opening_line_fragment = end_line[end_pos:] # Loop until EOF or find anything that's not whitespace or opening {. while not Search(r'^\s*\{', opening_line_fragment): if Search(r'^(?!\s*$)', opening_line_fragment): # Conditional has no brackets. return opening_linenum += 1 if opening_linenum == len(clean_lines.elided): # Couldn't find conditional's opening { or any code before EOF. return opening_line_fragment = clean_lines.elided[opening_linenum] # Set opening_line (opening_line_fragment may not be entire opening line). opening_line = clean_lines.elided[opening_linenum] # Find the position of the closing }. opening_pos = opening_line_fragment.find('{') if opening_linenum == end_linenum: # We need to make opening_pos relative to the start of the entire line. opening_pos += end_pos (closing_line, closing_linenum, closing_pos) = CloseExpression( clean_lines, opening_linenum, opening_pos) if closing_pos < 0: return # Now construct the body of the conditional. This consists of the portion # of the opening line after the {, all lines until the closing line, # and the portion of the closing line before the }. if (clean_lines.raw_lines[opening_linenum] != CleanseComments(clean_lines.raw_lines[opening_linenum])): # Opening line ends with a comment, so conditional isn't empty. return if closing_linenum > opening_linenum: # Opening line after the {. Ignore comments here since we checked above. bodylist = list(opening_line[opening_pos+1:]) # All lines until closing line, excluding closing line, with comments. bodylist.extend(clean_lines.raw_lines[opening_linenum+1:closing_linenum]) # Closing line before the }. Won't (and can't) have comments. bodylist.append(clean_lines.elided[closing_linenum][:closing_pos-1]) body = '\n'.join(bodylist) else: # If statement has brackets and fits on a single line. body = opening_line[opening_pos+1:closing_pos-1] # Check if the body is empty if not _EMPTY_CONDITIONAL_BODY_PATTERN.search(body): return # The body is empty. Now make sure there's not an else clause. current_linenum = closing_linenum current_line_fragment = closing_line[closing_pos:] # Loop until EOF or find anything that's not whitespace or else clause. while Search(r'^\s*$|^(?=\s*else)', current_line_fragment): if Search(r'^(?=\s*else)', current_line_fragment): # Found an else clause, so don't log an error. return current_linenum += 1 if current_linenum == len(clean_lines.elided): break current_line_fragment = clean_lines.elided[current_linenum] # The body is empty and there's no else clause until EOF or other code. error(filename, end_linenum, 'whitespace/empty_if_body', 4, ('If statement had no body and no else clause'))
170,174
Find a replaceable CHECK-like macro. Args: line: line to search on. Returns: (macro name, start position), or (None, -1) if no replaceable macro is found.
def FindCheckMacro(line): for macro in _CHECK_MACROS: i = line.find(macro) if i >= 0: # Find opening parenthesis. Do a regular expression match here # to make sure that we are matching the expected CHECK macro, as # opposed to some other macro that happens to contain the CHECK # substring. matched = Match(r'^(.*\b' + macro + r'\s*)\(', line) if not matched: continue return (macro, len(matched.group(1))) return (None, -1)
170,175
Checks the use of CHECK and EXPECT macros. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found.
def CheckCheck(filename, clean_lines, linenum, error): # Decide the set of replacement macros that should be suggested lines = clean_lines.elided (check_macro, start_pos) = FindCheckMacro(lines[linenum]) if not check_macro: return # Find end of the boolean expression by matching parentheses (last_line, end_line, end_pos) = CloseExpression( clean_lines, linenum, start_pos) if end_pos < 0: return # If the check macro is followed by something other than a # semicolon, assume users will log their own custom error messages # and don't suggest any replacements. if not Match(r'\s*;', last_line[end_pos:]): return if linenum == end_line: expression = lines[linenum][start_pos + 1:end_pos - 1] else: expression = lines[linenum][start_pos + 1:] for i in xrange(linenum + 1, end_line): expression += lines[i] expression += last_line[0:end_pos - 1] # Parse expression so that we can take parentheses into account. # This avoids false positives for inputs like "CHECK((a < 4) == b)", # which is not replaceable by CHECK_LE. lhs = '' rhs = '' operator = None while expression: matched = Match(r'^\s*(<<|<<=|>>|>>=|->\*|->|&&|\|\||' r'==|!=|>=|>|<=|<|\()(.*)$', expression) if matched: token = matched.group(1) if token == '(': # Parenthesized operand expression = matched.group(2) (end, _) = FindEndOfExpressionInLine(expression, 0, ['(']) if end < 0: return # Unmatched parenthesis lhs += '(' + expression[0:end] expression = expression[end:] elif token in ('&&', '||'): # Logical and/or operators. This means the expression # contains more than one term, for example: # CHECK(42 < a && a < b); # # These are not replaceable with CHECK_LE, so bail out early. return elif token in ('<<', '<<=', '>>', '>>=', '->*', '->'): # Non-relational operator lhs += token expression = matched.group(2) else: # Relational operator operator = token rhs = matched.group(2) break else: # Unparenthesized operand. Instead of appending to lhs one character # at a time, we do another regular expression match to consume several # characters at once if possible. Trivial benchmark shows that this # is more efficient when the operands are longer than a single # character, which is generally the case. matched = Match(r'^([^-=!<>()&|]+)(.*)$', expression) if not matched: matched = Match(r'^(\s*\S)(.*)$', expression) if not matched: break lhs += matched.group(1) expression = matched.group(2) # Only apply checks if we got all parts of the boolean expression if not (lhs and operator and rhs): return # Check that rhs do not contain logical operators. We already know # that lhs is fine since the loop above parses out && and ||. if rhs.find('&&') > -1 or rhs.find('||') > -1: return # At least one of the operands must be a constant literal. This is # to avoid suggesting replacements for unprintable things like # CHECK(variable != iterator) # # The following pattern matches decimal, hex integers, strings, and # characters (in that order). lhs = lhs.strip() rhs = rhs.strip() match_constant = r'^([-+]?(\d+|0[xX][0-9a-fA-F]+)[lLuU]{0,3}|".*"|\'.*\')$' if Match(match_constant, lhs) or Match(match_constant, rhs): # Note: since we know both lhs and rhs, we can provide a more # descriptive error message like: # Consider using CHECK_EQ(x, 42) instead of CHECK(x == 42) # Instead of: # Consider using CHECK_EQ instead of CHECK(a == b) # # We are still keeping the less descriptive message because if lhs # or rhs gets long, the error message might become unreadable. error(filename, linenum, 'readability/check', 2, 'Consider using %s instead of %s(a %s b)' % ( _CHECK_REPLACEMENT[check_macro][operator], check_macro, operator))
170,176
Check alternative keywords being used in boolean expressions. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found.
def CheckAltTokens(filename, clean_lines, linenum, error): line = clean_lines.elided[linenum] # Avoid preprocessor lines if Match(r'^\s*#', line): return # Last ditch effort to avoid multi-line comments. This will not help # if the comment started before the current line or ended after the # current line, but it catches most of the false positives. At least, # it provides a way to workaround this warning for people who use # multi-line comments in preprocessor macros. # # TODO(unknown): remove this once cpplint has better support for # multi-line comments. if line.find('/*') >= 0 or line.find('*/') >= 0: return for match in _ALT_TOKEN_REPLACEMENT_PATTERN.finditer(line): error(filename, linenum, 'readability/alt_tokens', 2, 'Use operator %s instead of %s' % ( _ALT_TOKEN_REPLACEMENT[match.group(1)], match.group(1)))
170,177
Determines the width of the line in column positions. Args: line: A string, which may be a Unicode string. Returns: The width of the line in column positions, accounting for Unicode combining characters and wide characters.
def GetLineWidth(line): if isinstance(line, unicode): width = 0 for uc in unicodedata.normalize('NFC', line): if unicodedata.east_asian_width(uc) in ('W', 'F'): width += 2 elif not unicodedata.combining(uc): width += 1 return width else: return len(line)
170,178
Drops common suffixes like _test.cc or -inl.h from filename. For example: >>> _DropCommonSuffixes('foo/foo-inl.h') 'foo/foo' >>> _DropCommonSuffixes('foo/bar/foo.cc') 'foo/bar/foo' >>> _DropCommonSuffixes('foo/foo_internal.h') 'foo/foo' >>> _DropCommonSuffixes('foo/foo_unusualinternal.h') 'foo/foo_unusualinternal' Args: filename: The input filename. Returns: The filename with the common suffix removed.
def _DropCommonSuffixes(filename): for suffix in itertools.chain( ('%s.%s' % (test_suffix.lstrip('_'), ext) for test_suffix, ext in itertools.product(_test_suffixes, GetNonHeaderExtensions())), ('%s.%s' % (suffix, ext) for suffix, ext in itertools.product(['inl', 'imp', 'internal'], GetHeaderExtensions()))): if (filename.endswith(suffix) and len(filename) > len(suffix) and filename[-len(suffix) - 1] in ('-', '_')): return filename[:-len(suffix) - 1] return os.path.splitext(filename)[0]
170,179
Check for unsafe global or static objects. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found.
def CheckGlobalStatic(filename, clean_lines, linenum, error): line = clean_lines.elided[linenum] # Match two lines at a time to support multiline declarations if linenum + 1 < clean_lines.NumLines() and not Search(r'[;({]', line): line += clean_lines.elided[linenum + 1].strip() # Check for people declaring static/global STL strings at the top level. # This is dangerous because the C++ language does not guarantee that # globals with constructors are initialized before the first access, and # also because globals can be destroyed when some threads are still running. # TODO(unknown): Generalize this to also find static unique_ptr instances. # TODO(unknown): File bugs for clang-tidy to find these. match = Match( r'((?:|static +)(?:|const +))(?::*std::)?string( +const)? +' r'([a-zA-Z0-9_:]+)\b(.*)', line) # Remove false positives: # - String pointers (as opposed to values). # string *pointer # const string *pointer # string const *pointer # string *const pointer # # - Functions and template specializations. # string Function<Type>(... # string Class<Type>::Method(... # # - Operators. These are matched separately because operator names # cross non-word boundaries, and trying to match both operators # and functions at the same time would decrease accuracy of # matching identifiers. # string Class::operator*() if (match and not Search(r'\bstring\b(\s+const)?\s*[\*\&]\s*(const\s+)?\w', line) and not Search(r'\boperator\W', line) and not Match(r'\s*(<.*>)?(::[a-zA-Z0-9_]+)*\s*\(([^"]|$)', match.group(4))): if Search(r'\bconst\b', line): error(filename, linenum, 'runtime/string', 4, 'For a static/global string constant, use a C style string ' 'instead: "%schar%s %s[]".' % (match.group(1), match.group(2) or '', match.group(3))) else: error(filename, linenum, 'runtime/string', 4, 'Static/global string variables are not permitted.') if (Search(r'\b([A-Za-z0-9_]*_)\(\1\)', line) or Search(r'\b([A-Za-z0-9_]*_)\(CHECK_NOTNULL\(\1\)\)', line)): error(filename, linenum, 'runtime/init', 4, 'You seem to be initializing a member variable with itself.')
170,184
Check for printf related issues. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found.
def CheckPrintf(filename, clean_lines, linenum, error): line = clean_lines.elided[linenum] # When snprintf is used, the second argument shouldn't be a literal. match = Search(r'snprintf\s*\(([^,]*),\s*([0-9]*)\s*,', line) if match and match.group(2) != '0': # If 2nd arg is zero, snprintf is used to calculate size. error(filename, linenum, 'runtime/printf', 3, 'If you can, use sizeof(%s) instead of %s as the 2nd arg ' 'to snprintf.' % (match.group(1), match.group(2))) # Check if some verboten C functions are being used. if Search(r'\bsprintf\s*\(', line): error(filename, linenum, 'runtime/printf', 5, 'Never use sprintf. Use snprintf instead.') match = Search(r'\b(strcpy|strcat)\s*\(', line) if match: error(filename, linenum, 'runtime/printf', 4, 'Almost always, snprintf is better than %s' % match.group(1))
170,185
Check if current line contains an inherited function. Args: clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. Returns: True if current line contains a function with "override" virt-specifier.
def IsDerivedFunction(clean_lines, linenum): # Scan back a few lines for start of current function for i in xrange(linenum, max(-1, linenum - 10), -1): match = Match(r'^([^()]*\w+)\(', clean_lines.elided[i]) if match: # Look for "override" after the matching closing parenthesis line, _, closing_paren = CloseExpression( clean_lines, i, len(match.group(1))) return (closing_paren >= 0 and Search(r'\boverride\b', line[closing_paren:])) return False
170,186
Check if current line contains an out-of-line method definition. Args: clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. Returns: True if current line contains an out-of-line method definition.
def IsOutOfLineMethodDefinition(clean_lines, linenum): # Scan back a few lines for start of current function for i in xrange(linenum, max(-1, linenum - 10), -1): if Match(r'^([^()]*\w+)\(', clean_lines.elided[i]): return Match(r'^[^()]*\w+::\w+\(', clean_lines.elided[i]) is not None return False
170,187
Check if current line is inside constructor initializer list. Args: clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. Returns: True if current line appears to be inside constructor initializer list, False otherwise.
def IsInitializerList(clean_lines, linenum): for i in xrange(linenum, 1, -1): line = clean_lines.elided[i] if i == linenum: remove_function_body = Match(r'^(.*)\{\s*$', line) if remove_function_body: line = remove_function_body.group(1) if Search(r'\s:\s*\w+[({]', line): # A lone colon tend to indicate the start of a constructor # initializer list. It could also be a ternary operator, which # also tend to appear in constructor initializer lists as # opposed to parameter lists. return True if Search(r'\}\s*,\s*$', line): # A closing brace followed by a comma is probably the end of a # brace-initialized member in constructor initializer list. return True if Search(r'[{};]\s*$', line): # Found one of the following: # - A closing brace or semicolon, probably the end of the previous # function. # - An opening brace, probably the start of current class or namespace. # # Current line is probably not inside an initializer list since # we saw one of those things without seeing the starting colon. return False # Got to the beginning of the file without seeing the start of # constructor initializer list. return False
170,188
Check for non-const references. Separate from CheckLanguage since it scans backwards from current line, instead of scanning forward. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. nesting_state: A NestingState instance which maintains information about the current stack of nested blocks being parsed. error: The function to call with any errors found.
def CheckForNonConstReference(filename, clean_lines, linenum, nesting_state, error): # Do nothing if there is no '&' on current line. line = clean_lines.elided[linenum] if '&' not in line: return # If a function is inherited, current function doesn't have much of # a choice, so any non-const references should not be blamed on # derived function. if IsDerivedFunction(clean_lines, linenum): return # Don't warn on out-of-line method definitions, as we would warn on the # in-line declaration, if it isn't marked with 'override'. if IsOutOfLineMethodDefinition(clean_lines, linenum): return # Long type names may be broken across multiple lines, usually in one # of these forms: # LongType # ::LongTypeContinued &identifier # LongType:: # LongTypeContinued &identifier # LongType< # ...>::LongTypeContinued &identifier # # If we detected a type split across two lines, join the previous # line to current line so that we can match const references # accordingly. # # Note that this only scans back one line, since scanning back # arbitrary number of lines would be expensive. If you have a type # that spans more than 2 lines, please use a typedef. if linenum > 1: previous = None if Match(r'\s*::(?:[\w<>]|::)+\s*&\s*\S', line): # previous_line\n + ::current_line previous = Search(r'\b((?:const\s*)?(?:[\w<>]|::)+[\w<>])\s*$', clean_lines.elided[linenum - 1]) elif Match(r'\s*[a-zA-Z_]([\w<>]|::)+\s*&\s*\S', line): # previous_line::\n + current_line previous = Search(r'\b((?:const\s*)?(?:[\w<>]|::)+::)\s*$', clean_lines.elided[linenum - 1]) if previous: line = previous.group(1) + line.lstrip() else: # Check for templated parameter that is split across multiple lines endpos = line.rfind('>') if endpos > -1: (_, startline, startpos) = ReverseCloseExpression( clean_lines, linenum, endpos) if startpos > -1 and startline < linenum: # Found the matching < on an earlier line, collect all # pieces up to current line. line = '' for i in xrange(startline, linenum + 1): line += clean_lines.elided[i].strip() # Check for non-const references in function parameters. A single '&' may # found in the following places: # inside expression: binary & for bitwise AND # inside expression: unary & for taking the address of something # inside declarators: reference parameter # We will exclude the first two cases by checking that we are not inside a # function body, including one that was just introduced by a trailing '{'. # TODO(unknown): Doesn't account for 'catch(Exception& e)' [rare]. if (nesting_state.previous_stack_top and not (isinstance(nesting_state.previous_stack_top, _ClassInfo) or isinstance(nesting_state.previous_stack_top, _NamespaceInfo))): # Not at toplevel, not within a class, and not within a namespace return # Avoid initializer lists. We only need to scan back from the # current line for something that starts with ':'. # # We don't need to check the current line, since the '&' would # appear inside the second set of parentheses on the current line as # opposed to the first set. if linenum > 0: for i in xrange(linenum - 1, max(0, linenum - 10), -1): previous_line = clean_lines.elided[i] if not Search(r'[),]\s*$', previous_line): break if Match(r'^\s*:\s+\S', previous_line): return # Avoid preprocessors if Search(r'\\\s*$', line): return # Avoid constructor initializer lists if IsInitializerList(clean_lines, linenum): return # We allow non-const references in a few standard places, like functions # called "swap()" or iostream operators like "<<" or ">>". Do not check # those function parameters. # # We also accept & in static_assert, which looks like a function but # it's actually a declaration expression. whitelisted_functions = (r'(?:[sS]wap(?:<\w:+>)?|' r'operator\s*[<>][<>]|' r'static_assert|COMPILE_ASSERT' r')\s*\(') if Search(whitelisted_functions, line): return elif not Search(r'\S+\([^)]*$', line): # Don't see a whitelisted function on this line. Actually we # didn't see any function name on this line, so this is likely a # multi-line parameter list. Try a bit harder to catch this case. for i in xrange(2): if (linenum > i and Search(whitelisted_functions, clean_lines.elided[linenum - i - 1])): return decls = ReplaceAll(r'{[^}]*}', ' ', line) # exclude function body for parameter in re.findall(_RE_PATTERN_REF_PARAM, decls): if (not Match(_RE_PATTERN_CONST_REF_PARAM, parameter) and not Match(_RE_PATTERN_REF_STREAM_PARAM, parameter)): error(filename, linenum, 'runtime/references', 2, 'Is this a non-const reference? ' 'If so, make const or use a pointer: ' + ReplaceAll(' *<', '<', parameter))
170,189
Various cast related checks. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found.
def CheckCasts(filename, clean_lines, linenum, error): line = clean_lines.elided[linenum] # Check to see if they're using an conversion function cast. # I just try to capture the most common basic types, though there are more. # Parameterless conversion functions, such as bool(), are allowed as they are # probably a member operator declaration or default constructor. match = Search( r'(\bnew\s+(?:const\s+)?|\S<\s*(?:const\s+)?)?\b' r'(int|float|double|bool|char|int32|uint32|int64|uint64)' r'(\([^)].*)', line) expecting_function = ExpectingFunctionArgs(clean_lines, linenum) if match and not expecting_function: matched_type = match.group(2) # matched_new_or_template is used to silence two false positives: # - New operators # - Template arguments with function types # # For template arguments, we match on types immediately following # an opening bracket without any spaces. This is a fast way to # silence the common case where the function type is the first # template argument. False negative with less-than comparison is # avoided because those operators are usually followed by a space. # # function<double(double)> // bracket + no space = false positive # value < double(42) // bracket + space = true positive matched_new_or_template = match.group(1) # Avoid arrays by looking for brackets that come after the closing # parenthesis. if Match(r'\([^()]+\)\s*\[', match.group(3)): return # Other things to ignore: # - Function pointers # - Casts to pointer types # - Placement new # - Alias declarations matched_funcptr = match.group(3) if (matched_new_or_template is None and not (matched_funcptr and (Match(r'\((?:[^() ]+::\s*\*\s*)?[^() ]+\)\s*\(', matched_funcptr) or matched_funcptr.startswith('(*)'))) and not Match(r'\s*using\s+\S+\s*=\s*' + matched_type, line) and not Search(r'new\(\S+\)\s*' + matched_type, line)): error(filename, linenum, 'readability/casting', 4, 'Using deprecated casting style. ' 'Use static_cast<%s>(...) instead' % matched_type) if not expecting_function: CheckCStyleCast(filename, clean_lines, linenum, 'static_cast', r'\((int|float|double|bool|char|u?int(16|32|64))\)', error) # This doesn't catch all cases. Consider (const char * const)"hello". # # (char *) "foo" should always be a const_cast (reinterpret_cast won't # compile). if CheckCStyleCast(filename, clean_lines, linenum, 'const_cast', r'\((char\s?\*+\s?)\)\s*"', error): pass else: # Check pointer casts for other than string constants CheckCStyleCast(filename, clean_lines, linenum, 'reinterpret_cast', r'\((\w+\s?\*+\s?)\)', error) # In addition, we look for people taking the address of a cast. This # is dangerous -- casts can assign to temporaries, so the pointer doesn't # point where you think. # # Some non-identifier character is required before the '&' for the # expression to be recognized as a cast. These are casts: # expression = &static_cast<int*>(temporary()); # function(&(int*)(temporary())); # # This is not a cast: # reference_type&(int* function_param); match = Search( r'(?:[^\w]&\(([^)*][^)]*)\)[\w(])|' r'(?:[^\w]&(static|dynamic|down|reinterpret)_cast\b)', line) if match: # Try a better error message when the & is bound to something # dereferenced by the casted pointer, as opposed to the casted # pointer itself. parenthesis_error = False match = Match(r'^(.*&(?:static|dynamic|down|reinterpret)_cast\b)<', line) if match: _, y1, x1 = CloseExpression(clean_lines, linenum, len(match.group(1))) if x1 >= 0 and clean_lines.elided[y1][x1] == '(': _, y2, x2 = CloseExpression(clean_lines, y1, x1) if x2 >= 0: extended_line = clean_lines.elided[y2][x2:] if y2 < clean_lines.NumLines() - 1: extended_line += clean_lines.elided[y2 + 1] if Match(r'\s*(?:->|\[)', extended_line): parenthesis_error = True if parenthesis_error: error(filename, linenum, 'readability/casting', 4, ('Are you taking an address of something dereferenced ' 'from a cast? Wrapping the dereferenced expression in ' 'parentheses will make the binding more obvious')) else: error(filename, linenum, 'runtime/casting', 4, ('Are you taking an address of a cast? ' 'This is dangerous: could be a temp var. ' 'Take the address before doing the cast, rather than after'))
170,190
Checks whether where function type arguments are expected. Args: clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. Returns: True if the line at 'linenum' is inside something that expects arguments of function types.
def ExpectingFunctionArgs(clean_lines, linenum): line = clean_lines.elided[linenum] return (Match(r'^\s*MOCK_(CONST_)?METHOD\d+(_T)?\(', line) or (linenum >= 2 and (Match(r'^\s*MOCK_(?:CONST_)?METHOD\d+(?:_T)?\((?:\S+,)?\s*$', clean_lines.elided[linenum - 1]) or Match(r'^\s*MOCK_(?:CONST_)?METHOD\d+(?:_T)?\(\s*$', clean_lines.elided[linenum - 2]) or Search(r'\bstd::m?function\s*\<\s*$', clean_lines.elided[linenum - 1]))))
170,192
Fill up the include_dict with new includes found from the file. Args: filename: the name of the header to read. include_dict: a dictionary in which the headers are inserted. io: The io factory to use to read the file. Provided for testability. Returns: True if a header was successfully added. False otherwise.
def UpdateIncludeState(filename, include_dict, io=codecs): headerfile = None try: headerfile = io.open(filename, 'r', 'utf8', 'replace') except IOError: return False linenum = 0 for line in headerfile: linenum += 1 clean_line = CleanseComments(line) match = _RE_PATTERN_INCLUDE.search(clean_line) if match: include = match.group(2) include_dict.setdefault(include, linenum) return True
170,194
Check that make_pair's template arguments are deduced. G++ 4.6 in C++11 mode fails badly if make_pair's template arguments are specified explicitly, and such use isn't intended in any case. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found.
def CheckMakePairUsesDeduction(filename, clean_lines, linenum, error): line = clean_lines.elided[linenum] match = _RE_PATTERN_EXPLICIT_MAKEPAIR.search(line) if match: error(filename, linenum, 'build/explicit_make_pair', 4, # 4 = high confidence 'For C++11-compatibility, omit template arguments from make_pair' ' OR use pair directly OR if appropriate, construct a pair directly')
170,195
Check if line contains a redundant "virtual" function-specifier. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found.
def CheckRedundantVirtual(filename, clean_lines, linenum, error): # Look for "virtual" on current line. line = clean_lines.elided[linenum] virtual = Match(r'^(.*)(\bvirtual\b)(.*)$', line) if not virtual: return # Ignore "virtual" keywords that are near access-specifiers. These # are only used in class base-specifier and do not apply to member # functions. if (Search(r'\b(public|protected|private)\s+$', virtual.group(1)) or Match(r'^\s+(public|protected|private)\b', virtual.group(3))): return # Ignore the "virtual" keyword from virtual base classes. Usually # there is a column on the same line in these cases (virtual base # classes are rare in google3 because multiple inheritance is rare). if Match(r'^.*[^:]:[^:].*$', line): return # Look for the next opening parenthesis. This is the start of the # parameter list (possibly on the next line shortly after virtual). # TODO(unknown): doesn't work if there are virtual functions with # decltype() or other things that use parentheses, but csearch suggests # that this is rare. end_col = -1 end_line = -1 start_col = len(virtual.group(2)) for start_line in xrange(linenum, min(linenum + 3, clean_lines.NumLines())): line = clean_lines.elided[start_line][start_col:] parameter_list = Match(r'^([^(]*)\(', line) if parameter_list: # Match parentheses to find the end of the parameter list (_, end_line, end_col) = CloseExpression( clean_lines, start_line, start_col + len(parameter_list.group(1))) break start_col = 0 if end_col < 0: return # Couldn't find end of parameter list, give up # Look for "override" or "final" after the parameter list # (possibly on the next few lines). for i in xrange(end_line, min(end_line + 3, clean_lines.NumLines())): line = clean_lines.elided[i][end_col:] match = Search(r'\b(override|final)\b', line) if match: error(filename, linenum, 'readability/inheritance', 4, ('"virtual" is redundant since function is ' 'already declared as "%s"' % match.group(1))) # Set end_col to check whole lines after we are done with the # first line. end_col = 0 if Search(r'[^\w]\s*$', line): break
170,196
Check if line contains a redundant "override" or "final" virt-specifier. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found.
def CheckRedundantOverrideOrFinal(filename, clean_lines, linenum, error): # Look for closing parenthesis nearby. We need one to confirm where # the declarator ends and where the virt-specifier starts to avoid # false positives. line = clean_lines.elided[linenum] declarator_end = line.rfind(')') if declarator_end >= 0: fragment = line[declarator_end:] else: if linenum > 1 and clean_lines.elided[linenum - 1].rfind(')') >= 0: fragment = line else: return # Check that at most one of "override" or "final" is present, not both if Search(r'\boverride\b', fragment) and Search(r'\bfinal\b', fragment): error(filename, linenum, 'readability/inheritance', 4, ('"override" is redundant since function is ' 'already declared as "final"'))
170,197
Checks that the new block is directly in a namespace. Args: nesting_state: The _NestingState object that contains info about our state. is_forward_declaration: If the class is a forward declared class. Returns: Whether or not the new block is directly in a namespace.
def IsBlockInNameSpace(nesting_state, is_forward_declaration): if is_forward_declaration: return len(nesting_state.stack) >= 1 and ( isinstance(nesting_state.stack[-1], _NamespaceInfo)) return (len(nesting_state.stack) > 1 and nesting_state.stack[-1].check_namespace_indentation and isinstance(nesting_state.stack[-2], _NamespaceInfo))
170,198
Flag those C++14 features that we restrict. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found.
def FlagCxx14Features(filename, clean_lines, linenum, error): line = clean_lines.elided[linenum] include = Match(r'\s*#\s*include\s+[<"]([^<"]+)[">]', line) # Flag unapproved C++14 headers. if include and include.group(1) in ('scoped_allocator', 'shared_mutex'): error(filename, linenum, 'build/c++14', 5, ('<%s> is an unapproved C++14 header.') % include.group(1))
170,201
Loads the configuration files and processes the config overrides. Args: filename: The name of the file being processed by the linter. Returns: False if the current |filename| should not be processed further.
def ProcessConfigOverrides(filename): abs_filename = os.path.abspath(filename) cfg_filters = [] keep_looking = True while keep_looking: abs_path, base_name = os.path.split(abs_filename) if not base_name: break # Reached the root directory. cfg_file = os.path.join(abs_path, "CPPLINT.cfg") abs_filename = abs_path if not os.path.isfile(cfg_file): continue try: with open(cfg_file) as file_handle: for line in file_handle: line, _, _ = line.partition('#') # Remove comments. if not line.strip(): continue name, _, val = line.partition('=') name = name.strip() val = val.strip() if name == 'set noparent': keep_looking = False elif name == 'filter': cfg_filters.append(val) elif name == 'exclude_files': # When matching exclude_files pattern, use the base_name of # the current file name or the directory name we are processing. # For example, if we are checking for lint errors in /foo/bar/baz.cc # and we found the .cfg file at /foo/CPPLINT.cfg, then the config # file's "exclude_files" filter is meant to be checked against "bar" # and not "baz" nor "bar/baz.cc". if base_name: pattern = re.compile(val) if pattern.match(base_name): _cpplint_state.PrintInfo('Ignoring "%s": file excluded by ' '"%s". File path component "%s" matches pattern "%s"\n' % (filename, cfg_file, base_name, val)) return False elif name == 'linelength': global _line_length try: _line_length = int(val) except ValueError: _cpplint_state.PrintError('Line length must be numeric.') elif name == 'extensions': global _valid_extensions try: extensions = [ext.strip() for ext in val.split(',')] _valid_extensions = set(extensions) except ValueError: sys.stderr.write('Extensions should be a comma-separated list of values;' 'for example: extensions=hpp,cpp\n' 'This could not be parsed: "%s"' % (val,)) elif name == 'headers': global _header_extensions try: extensions = [ext.strip() for ext in val.split(',')] _header_extensions = set(extensions) except ValueError: sys.stderr.write('Extensions should be a comma-separated list of values;' 'for example: extensions=hpp,cpp\n' 'This could not be parsed: "%s"' % (val,)) elif name == 'root': global _root _root = val else: _cpplint_state.PrintError( 'Invalid configuration option (%s) in file %s\n' % (name, cfg_file)) except IOError: _cpplint_state.PrintError( "Skipping config file '%s': Can't open for reading\n" % cfg_file) keep_looking = False # Apply all the accumulated filters in reverse order (top-level directory # config options having the least priority). for cfg_filter in reversed(cfg_filters): _AddFilters(cfg_filter) return True
170,203
Does google-lint on a single file. Args: filename: The name of the file to parse. vlevel: The level of errors to report. Every error of confidence >= verbose_level will be reported. 0 is a good default. extra_check_functions: An array of additional check functions that will be run on each source line. Each function takes 4 arguments: filename, clean_lines, line, error
def ProcessFile(filename, vlevel, extra_check_functions=None): _SetVerboseLevel(vlevel) _BackupFilters() if not ProcessConfigOverrides(filename): _RestoreFilters() return lf_lines = [] crlf_lines = [] try: # Support the UNIX convention of using "-" for stdin. Note that # we are not opening the file with universal newline support # (which codecs doesn't support anyway), so the resulting lines do # contain trailing '\r' characters if we are reading a file that # has CRLF endings. # If after the split a trailing '\r' is present, it is removed # below. if filename == '-': lines = codecs.StreamReaderWriter(sys.stdin, codecs.getreader('utf8'), codecs.getwriter('utf8'), 'replace').read().split('\n') else: lines = codecs.open(filename, 'r', 'utf8', 'replace').read().split('\n') # Remove trailing '\r'. # The -1 accounts for the extra trailing blank line we get from split() for linenum in range(len(lines) - 1): if lines[linenum].endswith('\r'): lines[linenum] = lines[linenum].rstrip('\r') crlf_lines.append(linenum + 1) else: lf_lines.append(linenum + 1) except IOError: _cpplint_state.PrintError( "Skipping input '%s': Can't open for reading\n" % filename) _RestoreFilters() return # Note, if no dot is found, this will give the entire filename as the ext. file_extension = filename[filename.rfind('.') + 1:] # When reading from stdin, the extension is unknown, so no cpplint tests # should rely on the extension. if filename != '-' and file_extension not in GetAllExtensions(): # bazel 0.5.1> uses four distinct generated files that gives a warning # we suppress the warning for these files bazel_gen_files = set([ "external/local_config_cc/libtool", "external/local_config_cc/make_hashed_objlist.py", "external/local_config_cc/wrapped_ar", "external/local_config_cc/wrapped_clang", "external/local_config_cc/xcrunwrapper.sh", ]) if not filename in bazel_gen_files: _cpplint_state.PrintError('Ignoring %s; not a valid file name ' '(%s)\n' % (filename, ', '.join(GetAllExtensions()))) else: ProcessFileData(filename, file_extension, lines, Error, extra_check_functions) # If end-of-line sequences are a mix of LF and CR-LF, issue # warnings on the lines with CR. # # Don't issue any warnings if all lines are uniformly LF or CR-LF, # since critique can handle these just fine, and the style guide # doesn't dictate a particular end of line sequence. # # We can't depend on os.linesep to determine what the desired # end-of-line sequence should be, since that will return the # server-side end-of-line sequence. if lf_lines and crlf_lines: # Warn on every line with CR. An alternative approach might be to # check whether the file is mostly CRLF or just LF, and warn on the # minority, we bias toward LF here since most tools prefer LF. for linenum in crlf_lines: Error(filename, linenum, 'whitespace/newline', 1, 'Unexpected \\r (^M) found; better to use only \\n') _RestoreFilters()
170,204
Parses the command line arguments. This may set the output format and verbosity level as side-effects. Args: args: The command line arguments: Returns: The list of filenames to lint.
def ParseArguments(args): try: (opts, filenames) = getopt.getopt(args, '', ['help', 'output=', 'verbose=', 'counting=', 'filter=', 'root=', 'repository=', 'linelength=', 'extensions=', 'exclude=', 'headers=', 'quiet', 'recursive']) except getopt.GetoptError: PrintUsage('Invalid arguments.') verbosity = _VerboseLevel() output_format = _OutputFormat() filters = '' counting_style = '' recursive = False for (opt, val) in opts: if opt == '--help': PrintUsage(None) elif opt == '--output': if val not in ('emacs', 'vs7', 'eclipse', 'junit'): PrintUsage('The only allowed output formats are emacs, vs7, eclipse ' 'and junit.') output_format = val elif opt == '--verbose': verbosity = int(val) elif opt == '--filter': filters = val if not filters: PrintCategories() elif opt == '--counting': if val not in ('total', 'toplevel', 'detailed'): PrintUsage('Valid counting options are total, toplevel, and detailed') counting_style = val elif opt == '--root': global _root _root = val elif opt == '--repository': global _repository _repository = val elif opt == '--linelength': global _line_length try: _line_length = int(val) except ValueError: PrintUsage('Line length must be digits.') elif opt == '--exclude': global _excludes if not _excludes: _excludes = set() _excludes.update(glob.glob(val)) elif opt == '--extensions': global _valid_extensions try: _valid_extensions = set(val.split(',')) except ValueError: PrintUsage('Extensions must be comma seperated list.') elif opt == '--headers': global _header_extensions try: _header_extensions = set(val.split(',')) except ValueError: PrintUsage('Extensions must be comma seperated list.') elif opt == '--recursive': recursive = True elif opt == '--quiet': global _quiet _quiet = True if not filenames: PrintUsage('No files were specified.') if recursive: filenames = _ExpandDirectories(filenames) if _excludes: filenames = _FilterExcludedFiles(filenames) _SetOutputFormat(output_format) _SetVerboseLevel(verbosity) _SetFilters(filters) _SetCountingStyle(counting_style) return filenames
170,206
Searches a list of filenames and replaces directories in the list with all files descending from those directories. Files with extensions not in the valid extensions list are excluded. Args: filenames: A list of files or directories Returns: A list of all files that are members of filenames or descended from a directory in filenames
def _ExpandDirectories(filenames): expanded = set() for filename in filenames: if not os.path.isdir(filename): expanded.add(filename) continue for root, _, files in os.walk(filename): for loopfile in files: fullname = os.path.join(root, loopfile) if fullname.startswith('.' + os.path.sep): fullname = fullname[len('.' + os.path.sep):] expanded.add(fullname) filtered = [] for filename in expanded: if os.path.splitext(filename)[1][1:] in GetAllExtensions(): filtered.append(filename) return filtered
170,207
Check if a header has already been included. Args: header: header to check. Returns: Line number of previous occurrence, or -1 if the header has not been seen before.
def FindHeader(self, header): for section_list in self.include_list: for f in section_list: if f[0] == header: return f[1] return -1
170,211
Reset section checking for preprocessor directive. Args: directive: preprocessor directive (e.g. "if", "else").
def ResetSection(self, directive): # The name of the current section. self._section = self._INITIAL_SECTION # The path of last found header. self._last_header = '' # Update list of includes. Note that we never pop from the # include list. if directive in ('if', 'ifdef', 'ifndef'): self.include_list.append([]) elif directive in ('else', 'elif'): self.include_list[-1] = []
170,212
Check if a header is in alphabetical order with the previous header. Args: clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. header_path: Canonicalized header to be checked. Returns: Returns true if the header is in alphabetical order.
def IsInAlphabeticalOrder(self, clean_lines, linenum, header_path): # If previous section is different from current section, _last_header will # be reset to empty string, so it's always less than current header. # # If previous line was a blank line, assume that the headers are # intentionally sorted the way they are. if (self._last_header > header_path and Match(r'^\s*#\s*include\b', clean_lines.elided[linenum - 1])): return False return True
170,213
Returns a non-empty error message if the next header is out of order. This function also updates the internal state to be ready to check the next include. Args: header_type: One of the _XXX_HEADER constants defined above. Returns: The empty string if the header is in the right order, or an error message describing what's wrong.
def CheckNextIncludeOrder(self, header_type): error_message = ('Found %s after %s' % (self._TYPE_NAMES[header_type], self._SECTION_NAMES[self._section])) last_section = self._section if header_type == _C_SYS_HEADER: if self._section <= self._C_SECTION: self._section = self._C_SECTION else: self._last_header = '' return error_message elif header_type == _CPP_SYS_HEADER: if self._section <= self._CPP_SECTION: self._section = self._CPP_SECTION else: self._last_header = '' return error_message elif header_type == _LIKELY_MY_HEADER: if self._section <= self._MY_H_SECTION: self._section = self._MY_H_SECTION else: self._section = self._OTHER_H_SECTION elif header_type == _POSSIBLE_MY_HEADER: if self._section <= self._MY_H_SECTION: self._section = self._MY_H_SECTION else: # This will always be the fallback because we're not sure # enough that the header is associated with this file. self._section = self._OTHER_H_SECTION else: assert header_type == _OTHER_HEADER self._section = self._OTHER_H_SECTION if last_section != self._section: self._last_header = '' return ''
170,214
Start analyzing function body. Args: function_name: The name of the function being tracked.
def Begin(self, function_name): self.in_a_function = True self.lines_in_function = 0 self.current_function = function_name
170,224
Collapses strings and chars on a line to simple "" or '' blocks. We nix strings first so we're not fooled by text like '"http://"' Args: elided: The line being processed. Returns: The line with collapsed strings.
def _CollapseStrings(elided): if _RE_PATTERN_INCLUDE.match(elided): return elided # Remove escaped characters first to make quote/single quote collapsing # basic. Things that look like escaped characters shouldn't occur # outside of strings and chars. elided = _RE_PATTERN_CLEANSE_LINE_ESCAPES.sub('', elided) # Replace quoted strings and digit separators. Both single quotes # and double quotes are processed in the same loop, otherwise # nested quotes wouldn't work. collapsed = '' while True: # Find the first quote character match = Match(r'^([^\'"]*)([\'"])(.*)$', elided) if not match: collapsed += elided break head, quote, tail = match.groups() if quote == '"': # Collapse double quoted strings second_quote = tail.find('"') if second_quote >= 0: collapsed += head + '""' elided = tail[second_quote + 1:] else: # Unmatched double quote, don't bother processing the rest # of the line since this is probably a multiline string. collapsed += elided break else: # Found single quote, check nearby text to eliminate digit separators. # # There is no special handling for floating point here, because # the integer/fractional/exponent parts would all be parsed # correctly as long as there are digits on both sides of the # separator. So we are fine as long as we don't see something # like "0.'3" (gcc 4.9.0 will not allow this literal). if Search(r'\b(?:0[bBxX]?|[1-9])[0-9a-fA-F]*$', head): match_literal = Match(r'^((?:\'?[0-9a-zA-Z_])*)(.*)$', "'" + tail) collapsed += head + match_literal.group(1).replace("'", '') elided = match_literal.group(2) else: second_quote = tail.find('\'') if second_quote >= 0: collapsed += head + "''" elided = tail[second_quote + 1:] else: # Unmatched single quote collapsed += elided break return collapsed
170,228
Check if current position is inside template argument list. Args: clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. pos: position just after the suspected template argument. Returns: True if (linenum, pos) is inside template arguments.
def InTemplateArgumentList(self, clean_lines, linenum, pos): while linenum < clean_lines.NumLines(): # Find the earliest character that might indicate a template argument line = clean_lines.elided[linenum] match = Match(r'^[^{};=\[\]\.<>]*(.)', line[pos:]) if not match: linenum += 1 pos = 0 continue token = match.group(1) pos += len(match.group(0)) # These things do not look like template argument list: # class Suspect { # class Suspect x; } if token in ('{', '}', ';'): return False # These things look like template argument list: # template <class Suspect> # template <class Suspect = default_value> # template <class Suspect[]> # template <class Suspect...> if token in ('>', '=', '[', ']', '.'): return True # Check if token is an unmatched '<'. # If not, move on to the next character. if token != '<': pos += 1 if pos >= len(line): linenum += 1 pos = 0 continue # We can't be sure if we just find a single '<', and need to # find the matching '>'. (_, end_line, end_pos) = CloseExpression(clean_lines, linenum, pos - 1) if end_pos < 0: # Not sure if template argument list or syntax error in file return False linenum = end_line pos = end_pos return False
170,235
Update nesting state with current line. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found.
def Update(self, filename, clean_lines, linenum, error): line = clean_lines.elided[linenum] # Remember top of the previous nesting stack. # # The stack is always pushed/popped and not modified in place, so # we can just do a shallow copy instead of copy.deepcopy. Using # deepcopy would slow down cpplint by ~28%. if self.stack: self.previous_stack_top = self.stack[-1] else: self.previous_stack_top = None # Update pp_stack self.UpdatePreprocessor(line) # Count parentheses. This is to avoid adding struct arguments to # the nesting stack. if self.stack: inner_block = self.stack[-1] depth_change = line.count('(') - line.count(')') inner_block.open_parentheses += depth_change # Also check if we are starting or ending an inline assembly block. if inner_block.inline_asm in (_NO_ASM, _END_ASM): if (depth_change != 0 and inner_block.open_parentheses == 1 and _MATCH_ASM.match(line)): # Enter assembly block inner_block.inline_asm = _INSIDE_ASM else: # Not entering assembly block. If previous line was _END_ASM, # we will now shift to _NO_ASM state. inner_block.inline_asm = _NO_ASM elif (inner_block.inline_asm == _INSIDE_ASM and inner_block.open_parentheses == 0): # Exit assembly block inner_block.inline_asm = _END_ASM # Consume namespace declaration at the beginning of the line. Do # this in a loop so that we catch same line declarations like this: # namespace proto2 { namespace bridge { class MessageSet; } } while True: # Match start of namespace. The "\b\s*" below catches namespace # declarations even if it weren't followed by a whitespace, this # is so that we don't confuse our namespace checker. The # missing spaces will be flagged by CheckSpacing. namespace_decl_match = Match(r'^\s*namespace\b\s*([:\w]+)?(.*)$', line) if not namespace_decl_match: break new_namespace = _NamespaceInfo(namespace_decl_match.group(1), linenum) self.stack.append(new_namespace) line = namespace_decl_match.group(2) if line.find('{') != -1: new_namespace.seen_open_brace = True line = line[line.find('{') + 1:] # Look for a class declaration in whatever is left of the line # after parsing namespaces. The regexp accounts for decorated classes # such as in: # class LOCKABLE API Object { # }; class_decl_match = Match( r'^(\s*(?:template\s*<[\w\s<>,:=]*>\s*)?' r'(class|struct)\s+(?:[A-Z_]+\s+)*(\w+(?:::\w+)*))' r'(.*)$', line) if (class_decl_match and (not self.stack or self.stack[-1].open_parentheses == 0)): # We do not want to accept classes that are actually template arguments: # template <class Ignore1, # class Ignore2 = Default<Args>, # template <Args> class Ignore3> # void Function() {}; # # To avoid template argument cases, we scan forward and look for # an unmatched '>'. If we see one, assume we are inside a # template argument list. end_declaration = len(class_decl_match.group(1)) if not self.InTemplateArgumentList(clean_lines, linenum, end_declaration): self.stack.append(_ClassInfo( class_decl_match.group(3), class_decl_match.group(2), clean_lines, linenum)) line = class_decl_match.group(4) # If we have not yet seen the opening brace for the innermost block, # run checks here. if not self.SeenOpenBrace(): self.stack[-1].CheckBegin(filename, clean_lines, linenum, error) # Update access control if we are inside a class/struct if self.stack and isinstance(self.stack[-1], _ClassInfo): classinfo = self.stack[-1] access_match = Match( r'^(.*)\b(public|private|protected|signals)(\s+(?:slots\s*)?)?' r':(?:[^:]|$)', line) if access_match: classinfo.access = access_match.group(2) # Check that access keywords are indented +1 space. Skip this # check if the keywords are not preceded by whitespaces. indent = access_match.group(1) if (len(indent) != classinfo.class_indent + 1 and Match(r'^\s*$', indent)): if classinfo.is_struct: parent = 'struct ' + classinfo.name else: parent = 'class ' + classinfo.name slots = '' if access_match.group(3): slots = access_match.group(3) error(filename, linenum, 'whitespace/indent', 3, '%s%s: should be indented +1 space inside %s' % ( access_match.group(2), slots, parent)) # Consume braces or semicolons from what's left of the line while True: # Match first brace, semicolon, or closed parenthesis. matched = Match(r'^[^{;)}]*([{;)}])(.*)$', line) if not matched: break token = matched.group(1) if token == '{': # If namespace or class hasn't seen a opening brace yet, mark # namespace/class head as complete. Push a new block onto the # stack otherwise. if not self.SeenOpenBrace(): self.stack[-1].seen_open_brace = True elif Match(r'^extern\s*"[^"]*"\s*\{', line): self.stack.append(_ExternCInfo(linenum)) else: self.stack.append(_BlockInfo(linenum, True)) if _MATCH_ASM.match(line): self.stack[-1].inline_asm = _BLOCK_ASM elif token == ';' or token == ')': # If we haven't seen an opening brace yet, but we already saw # a semicolon, this is probably a forward declaration. Pop # the stack for these. # # Similarly, if we haven't seen an opening brace yet, but we # already saw a closing parenthesis, then these are probably # function arguments with extra "class" or "struct" keywords. # Also pop these stack for these. if not self.SeenOpenBrace(): self.stack.pop() else: # token == '}' # Perform end of block checks and pop the stack. if self.stack: self.stack[-1].CheckEnd(filename, clean_lines, linenum, error) self.stack.pop() line = matched.group(2)
170,237
Checks that all classes and namespaces have been completely parsed. Call this when all lines in a file have been processed. Args: filename: The name of the current file. error: The function to call with any errors found.
def CheckCompletedBlocks(self, filename, error): # Note: This test can result in false positives if #ifdef constructs # get in the way of brace matching. See the testBuildClass test in # cpplint_unittest.py for an example of this. for obj in self.stack: if isinstance(obj, _ClassInfo): error(filename, obj.starting_linenum, 'build/class', 5, 'Failed to find complete declaration of class %s' % obj.name) elif isinstance(obj, _NamespaceInfo): error(filename, obj.starting_linenum, 'build/namespaces', 5, 'Failed to find complete declaration of namespace %s' % obj.name)
170,239
Make sympy symbols q0, q1, ... Args: n(int), m(int, optional): If specified both n and m, returns [qn, q(n+1), ..., qm], Only n is specified, returns[q0, q1, ..., qn]. Return: tuple(Symbol): Tuple of sympy symbols.
def make_qs(n, m=None): try: import sympy except ImportError: raise ImportError("This function requires sympy. Please install it.") if m is None: syms = sympy.symbols(" ".join(f"q{i}" for i in range(n))) if isinstance(syms, tuple): return syms else: return (syms,) syms = sympy.symbols(" ".join(f"q{i}" for i in range(n, m))) if isinstance(syms, tuple): return syms else: return (syms,)
170,324
Convert Sympy's expr to QUBO. Args: expr: Sympy's quadratic expression with variable `q0`, `q1`, ... Returns: [[float]]: Returns QUBO matrix.
def qn_to_qubo(expr): try: import sympy except ImportError: raise ImportError("This function requires sympy. Please install it.") assert type(expr) == sympy.Add to_i = lambda s: int(str(s)[1:]) max_i = max(map(to_i, expr.free_symbols)) + 1 qubo = [[0.] * max_i for _ in range(max_i)] for arg in expr.args: syms = arg.free_symbols assert len(syms) <= 2 if len(syms) == 2: assert type(arg) == sympy.Mul i, j = list(map(to_i, syms)) if i > j: i, j = j, i if i == j: if len(arg.args) == 2: qubo[i][i] = float(arg.args[0]) elif len(arg.args) == 1: qubo[i][i] = 1.0 else: raise ValueError(f"Too many args! arg.args = {arg.args}") continue if len(arg.args) == 3: qubo[i][j] = float(arg.args[0]) elif len(arg.args) == 2: qubo[i][j] if len(syms) == 1: if len(arg.args) == 2: assert type(arg) == sympy.Mul i = to_i(next(iter(syms))) qubo[i][i] = float(arg.args[0]) elif len(arg.args) == 1: qubo[i][i] = 1.0 else: raise ValueError(f"Too many args! arg.args = {arg.args}") return qubo
170,326
Register new macro to Circuit. Args: name (str): The name of macro. func (callable): The function to be called. allow_overwrite (bool, optional): If True, allow to overwrite the existing macro. Otherwise, raise the ValueError. Raises: ValueError: The name is duplicated with existing macro, gate or method. When `allow_overwrite=True`, this error is not raised.
def register_macro(name: str, func: Callable, allow_overwrite: bool = False) -> None: if hasattr(Circuit, name): if allow_overwrite: warnings.warn(f"Circuit has attribute `{name}`.") else: raise ValueError(f"Circuit has attribute `{name}`.") if name.startswith("run_with_"): if allow_overwrite: warnings.warn(f"Gate name `{name}` may conflict with run of backend.") else: raise ValueError(f"Gate name `{name}` shall not start with 'run_with_'.") if not allow_overwrite: if name in GATE_SET: raise ValueError(f"Gate '{name}' is already exists in gate set.") if name in GLOBAL_MACROS: raise ValueError(f"Macro '{name}' is already exists.") GLOBAL_MACROS[name] = func
170,455
Register new gate to gate set. Args: name (str): The name of gate. gateclass (type): The type object of gate. allow_overwrite (bool, optional): If True, allow to overwrite the existing gate. Otherwise, raise the ValueError. Raises: ValueError: The name is duplicated with existing gate. When `allow_overwrite=True`, this error is not raised.
def register_gate(name, gateclass, allow_overwrite=False): if hasattr(Circuit, name): if allow_overwrite: warnings.warn(f"Circuit has attribute `{name}`.") else: raise ValueError(f"Circuit has attribute `{name}`.") if name.startswith("run_with_"): if allow_overwrite: warnings.warn(f"Gate name `{name}` may conflict with run of backend.") else: raise ValueError(f"Gate name `{name}` shall not start with 'run_with_'.") if not allow_overwrite: if name in GATE_SET: raise ValueError(f"Gate '{name}' is already exists in gate set.") if name in GLOBAL_MACROS: raise ValueError(f"Macro '{name}' is already exists.") GATE_SET[name] = gateclass
170,456
Register new backend. Args: name (str): The name of backend. gateclass (type): The type object of backend allow_overwrite (bool, optional): If True, allow to overwrite the existing backend. Otherwise, raise the ValueError. Raises: ValueError: The name is duplicated with existing backend. When `allow_overwrite=True`, this error is not raised.
def register_backend(name, backend, allow_overwrite=False): if hasattr(Circuit, "run_with_" + name): if allow_overwrite: warnings.warn(f"Circuit has attribute `run_with_{name}`.") else: raise ValueError(f"Circuit has attribute `run_with_{name}`.") if not allow_overwrite: if name in BACKENDS: raise ValueError(f"Backend '{name}' is already registered as backend.") BACKENDS[name] = backend
170,457
Make Pauli matrix from an character. Args: ch (str): "X" or "Y" or "Z" or "I". n (int, optional): Make Pauli matrix as n-th qubits. Returns: If ch is "X" => X, "Y" => Y, "Z" => Z, "I" => I Raises: ValueError: When ch is not "X", "Y", "Z" nor "I".
def pauli_from_char(ch, n=0): ch = ch.upper() if ch == "I": return I if ch == "X": return X(n) if ch == "Y": return Y(n) if ch == "Z": return Z(n) raise ValueError("ch shall be X, Y, Z or I")
170,459
Returns [expr1, expr2] = expr1 * expr2 - expr2 * expr1. Args: expr1 (Expr, Term or Pauli operator): Pauli's expression. expr2 (Expr, Term or Pauli operator): Pauli's expression. Returns: Expr: expr1 * expr2 - expr2 * expr1.
def commutator(expr1, expr2): expr1 = expr1.to_expr().simplify() expr2 = expr2.to_expr().simplify() return (expr1 * expr2 - expr2 * expr1).simplify()
170,460
Test whether expr1 and expr2 are commutable. Args: expr1 (Expr, Term or Pauli operator): Pauli's expression. expr2 (Expr, Term or Pauli operator): Pauli's expression. eps (float, optional): Machine epsilon. If |[expr1, expr2]| < eps, consider it is commutable. Returns: bool: if expr1 and expr2 are commutable, returns True, otherwise False.
def is_commutable(expr1, expr2, eps=0.00000001): return sum((x * x.conjugate()).real for x in commutator(expr1, expr2).coeffs()) < eps
170,461
Make Pauli's Term from chars which is written by "X", "Y", "Z" or "I". e.g. "XZIY" => X(0) * Z(1) * Y(3) Args: chars (str): Written in "X", "Y", "Z" or "I". Returns: Term: A `Term` object. Raises: ValueError: When chars conteins the character which is "X", "Y", "Z" nor "I".
def from_chars(chars): paulis = [pauli_from_char(c, n) for n, c in enumerate(chars) if c != "I"] if not paulis: return 1.0 * I if len(paulis) == 1: return 1.0 * paulis[0] return reduce(lambda a, b: a * b, paulis)
170,468
Sets the stroke properties. Args: width (int): stroke width color (str): stroke color
def set_stroke(self, width=1, color='black'): self.attributes['stroke'] = color self.attributes['stroke-width'] = str(width)
170,514
setup a method as an event, adding also javascript code to generate Args: js_code (str): javascript code to generate the event client-side. js_code is added to the widget html as widget.attributes['onclick'] = js_code%{'emitter_identifier':widget.identifier, 'event_name':'onclick'}
def decorate_event_js(js_code): def add_annotation(method): setattr(method, "__is_event", True ) setattr(method, "_js_code", js_code ) return method return add_annotation
170,611
Private decorator for use in the editor. Allows the Editor to create listener methods. Args: params (str): The list of parameters for the listener method (es. "(self, new_value)")
def decorate_set_on_listener(prototype): # noinspection PyDictCreation,PyProtectedMember def add_annotation(method): method._event_info = {} method._event_info['name'] = method.__name__ method._event_info['prototype'] = prototype return method return add_annotation
170,612
It is used to automatically represent the object to HTML format packs all the attributes, children and so on. Args: changed_widgets (dict): A dictionary containing a collection of tags that have to be updated. The tag that have to be updated is the key, and the value is its textual repr.
def repr(self, changed_widgets=None): if changed_widgets is None: changed_widgets = {} local_changed_widgets = {} _innerHTML = self.innerHTML(local_changed_widgets) if self._ischanged() or ( len(local_changed_widgets) > 0 ): self._backup_repr = ''.join(('<', self.type, ' ', self._repr_attributes, '>', _innerHTML, '</', self.type, '>')) #faster but unsupported before python3.6 #self._backup_repr = f'<{self.type} {self._repr_attributes}>{_innerHTML}</{self.type}>' if self._ischanged(): # if self changed, no matter about the children because will be updated the entire parent # and so local_changed_widgets is not merged changed_widgets[self] = self._backup_repr self._set_updated() else: changed_widgets.update(local_changed_widgets) return self._backup_repr
170,625
Adds a child to the Tag To retrieve the child call get_child or access to the Tag.children[key] dictionary. Args: key (str): Unique child's identifier, or iterable of keys value (Tag, str): can be a Tag, an iterable of Tag or a str. In case of iterable of Tag is a dict, each item's key is set as 'key' param
def add_child(self, key, value): if type(value) in (list, tuple, dict): if type(value)==dict: for k in value.keys(): self.add_child(k, value[k]) return i = 0 for child in value: self.add_child(key[i], child) i = i + 1 return if hasattr(value, 'attributes'): value.attributes['data-parent-widget'] = self.identifier value._parent = self if key in self.children: self._render_children_list.remove(key) self._render_children_list.append(key) self.children[key] = value
170,631
Removes a child instance from the Tag's children. Args: child (Tag): The child to be removed.
def remove_child(self, child): if child in self.children.values() and hasattr(child, 'identifier'): for k in self.children.keys(): if hasattr(self.children[k], 'identifier'): if self.children[k].identifier == child.identifier: if k in self._render_children_list: self._render_children_list.remove(k) self.children.pop(k) # when the child is removed we stop the iteration # this implies that a child replication should not be allowed break
170,633
Allows to set style properties for the widget. Args: style (str or dict): The style property dictionary or json string.
def set_style(self, style): if style is not None: try: self.style.update(style) except ValueError: for s in style.split(';'): k, v = s.split(':', 1) self.style[k.strip()] = v.strip()
170,635
Set the widget size. Args: width (int or str): An optional width for the widget (es. width=10 or width='10px' or width='10%'). height (int or str): An optional height for the widget (es. height=10 or height='10px' or height='10%').
def set_size(self, width, height): if width is not None: try: width = to_pix(int(width)) except ValueError: # now we know w has 'px or % in it' pass self.style['width'] = width if height is not None: try: height = to_pix(int(height)) except ValueError: # now we know w has 'px or % in it' pass self.style['height'] = height
170,636
Represents the widget as HTML format, packs all the attributes, children and so on. Args: client (App): Client instance. changed_widgets (dict): A dictionary containing a collection of widgets that have to be updated. The Widget that have to be updated is the key, and the value is its textual repr.
def repr(self, changed_widgets=None): if changed_widgets is None: changed_widgets={} return super(Widget, self).repr(changed_widgets)
170,637
Called when user types and releases a key. The widget should be able to receive the focus in order to emit the event. Assign a 'tabindex' attribute to make it focusable. Args: key (str): the character value keycode (str): the numeric char code
def onkeyup(self, key, keycode, ctrl, shift, alt): return (key, keycode, ctrl, shift, alt)
170,638
Called when user types and releases a key. The widget should be able to receive the focus in order to emit the event. Assign a 'tabindex' attribute to make it focusable. Args: key (str): the character value keycode (str): the numeric char code
def onkeydown(self, key, keycode, ctrl, shift, alt): return (key, keycode, ctrl, shift, alt)
170,639
It is used to automatically represent the object to HTML format packs all the attributes, children and so on. Args: changed_widgets (dict): A dictionary containing a collection of tags that have to be updated. The tag that have to be updated is the key, and the value is its textual repr.
def repr(self, changed_widgets=None): if changed_widgets is None: changed_widgets={} local_changed_widgets = {} self._set_updated() return ''.join(('<', self.type, '>\n', self.innerHTML(local_changed_widgets), '\n</', self.type, '>'))
170,641
Allows to define an icon for the App Args: filename (str): the resource file name (ie. "/res:myicon.png") rel (str): leave it unchanged (standard "icon")
def set_icon_file(self, filename, rel="icon"): mimetype, encoding = mimetypes.guess_type(filename) self.add_child("favicon", '<link rel="%s" href="%s" type="%s" />'%(rel, filename, mimetype))
170,643
Allows to define an icon for the App Args: base64_data (str): base64 encoded image data (ie. "data:image/x-icon;base64,AAABAAEAEBA....") mimetype (str): mimetype of the image ("image/png" or "image/x-icon"...) rel (str): leave it unchanged (standard "icon")
def set_icon_data(self, base64_data, mimetype="image/png", rel="icon"): self.add_child("favicon", '<link rel="%s" href="%s" type="%s" />'%(rel, base64_data, mimetype))
170,644