rulebook_cppcheck.checkers
1from rulebook_cppcheck.checkers.abbreviation_as_word import AbbreviationAsWordChecker 2from rulebook_cppcheck.checkers.assignment_wrap import AssignmentWrapChecker 3from rulebook_cppcheck.checkers.block_comment_spaces import BlockCommentSpacesChecker 4from rulebook_cppcheck.checkers.block_comment_trim import BlockCommentTrimChecker 5from rulebook_cppcheck.checkers.block_tag_indentation import BlockTagIndentationChecker 6from rulebook_cppcheck.checkers.block_tag_punctuation import BlockTagPunctuationChecker 7from rulebook_cppcheck.checkers.case_separator import CaseSeparatorChecker 8from rulebook_cppcheck.checkers.chain_call_wrap import ChainCallWrapChecker 9from rulebook_cppcheck.checkers.class_name import ClassNameChecker 10from rulebook_cppcheck.checkers.comment_spaces import CommentSpacesChecker 11from rulebook_cppcheck.checkers.comment_trim import CommentTrimChecker 12from rulebook_cppcheck.checkers.complicated_assignment import ComplicatedAssignmentChecker 13from rulebook_cppcheck.checkers.duplicate_blank_line import DuplicateBlankLineChecker 14from rulebook_cppcheck.checkers.duplicate_blank_line_in_block_comment import \ 15 DuplicateBlankLineInBlockCommentChecker 16from rulebook_cppcheck.checkers.duplicate_blank_line_in_comment import \ 17 DuplicateBlankLineInCommentChecker 18from rulebook_cppcheck.checkers.duplicate_space import DuplicateSpaceChecker 19from rulebook_cppcheck.checkers.file_name import FileNameChecker 20from rulebook_cppcheck.checkers.file_size import FileSizeChecker 21from rulebook_cppcheck.checkers.generic_name import GenericNameChecker 22from rulebook_cppcheck.checkers.identifier_name import IdentifierNameChecker 23from rulebook_cppcheck.checkers.illegal_catch import IllegalCatchChecker 24from rulebook_cppcheck.checkers.illegal_throw import IllegalThrowChecker 25from rulebook_cppcheck.checkers.illegal_variable_name import IllegalVariableNameChecker 26from rulebook_cppcheck.checkers.import_order import ImportOrderChecker 27from rulebook_cppcheck.checkers.indent_style import IndentStyleChecker 28from rulebook_cppcheck.checkers.inner_class_position import InnerClassPositionChecker 29from rulebook_cppcheck.checkers.line_length import LineLengthChecker 30from rulebook_cppcheck.checkers.lonely_case import LonelyCaseChecker 31from rulebook_cppcheck.checkers.lonely_if import LonelyIfChecker 32from rulebook_cppcheck.checkers.lowercase_f import LowercaseFChecker 33from rulebook_cppcheck.checkers.lowercase_hexadecimal import LowercaseHexadecimalChecker 34from rulebook_cppcheck.checkers.meaningless_word import MeaninglessWordChecker 35from rulebook_cppcheck.checkers.member_order import MemberOrderChecker 36from rulebook_cppcheck.checkers.member_separator import MemberSeparatorChecker 37from rulebook_cppcheck.checkers.operator_wrap import OperatorWrapChecker 38from rulebook_cppcheck.checkers.package_name import PackageNameChecker 39from rulebook_cppcheck.checkers.parameter_wrap import ParameterWrapChecker 40from rulebook_cppcheck.checkers.parentheses_clip import ParenthesesClipChecker 41from rulebook_cppcheck.checkers.parentheses_trim import ParenthesesTrimChecker 42from rulebook_cppcheck.checkers.redundant_default import RedundantDefaultChecker 43from rulebook_cppcheck.checkers.redundant_else import RedundantElseChecker 44from rulebook_cppcheck.checkers.redundant_if import RedundantIfChecker 45from rulebook_cppcheck.checkers.trailing_newline import TrailingNewlineChecker 46from rulebook_cppcheck.checkers.todo_comment import TodoCommentChecker 47from rulebook_cppcheck.checkers.unnecessary_return import UnnecessaryReturnChecker 48from rulebook_cppcheck.checkers.unnecessary_trailing_whitespace import \ 49 UnnecessaryTrailingWhitespaceChecker 50from rulebook_cppcheck.checkers.uppercase_l import UppercaseLChecker 51 52__all__: list[str] = [ 53 'AbbreviationAsWordChecker', 54 'AssignmentWrapChecker', 55 'BlockCommentSpacesChecker', 56 'BlockCommentTrimChecker', 57 'BlockTagIndentationChecker', 58 'BlockTagPunctuationChecker', 59 'CaseSeparatorChecker', 60 'ChainCallWrapChecker', 61 'ClassNameChecker', 62 'CommentSpacesChecker', 63 'CommentTrimChecker', 64 'ComplicatedAssignmentChecker', 65 'DuplicateBlankLineChecker', 66 'DuplicateBlankLineInBlockCommentChecker', 67 'DuplicateBlankLineInCommentChecker', 68 'DuplicateSpaceChecker', 69 'FileNameChecker', 70 'FileSizeChecker', 71 'FinalNewlineChecker', 72 'GenericNameChecker', 73 'IdentifierNameChecker', 74 'IllegalCatchChecker', 75 'IllegalThrowChecker', 76 'IllegalVariableNameChecker', 77 'ImportOrderChecker', 78 'IndentStyleChecker', 79 'InnerClassPositionChecker', 80 'LineLengthChecker', 81 'LonelyCaseChecker', 82 'LonelyIfChecker', 83 'LowercaseFChecker', 84 'LowercaseHexadecimalChecker', 85 'MeaninglessWordChecker', 86 'MemberOrderChecker', 87 'MemberSeparatorChecker', 88 'OperatorWrapChecker', 89 'PackageNameChecker', 90 'ParameterWrapChecker', 91 'ParenthesesClipChecker', 92 'ParenthesesTrimChecker', 93 'RedundantDefaultChecker', 94 'RedundantElseChecker', 95 'RedundantIfChecker', 96 'TodoCommentChecker', 97 'UnnecessaryReturnChecker', 98 'UnnecessaryTrailingWhitespaceChecker', 99 'UppercaseLChecker', 100]
class
AbbreviationAsWordChecker(rulebook_cppcheck.checkers.rulebook_checkers.RulebookChecker):
14class AbbreviationAsWordChecker(RulebookChecker): 15 """See detail: https://hanggrian.github.io/rulebook/rules/#abbreviation-as-word""" 16 ID: str = 'abbreviation-as-word' 17 _MSG: str = 'abbreviation.as.word' 18 19 _ABBREVIATION_REGEX: Pattern = re(r'[A-Z]{3,}(?=[A-Z][a-z]|$)') 20 21 def get_scopeset(self) -> set[str]: 22 return {'Class', 'Struct', 'Union', 'Enum'} 23 24 def visit_scope(self, scope: Scope) -> None: 25 # checks for violation 26 if scope.className is None or \ 27 not self._ABBREVIATION_REGEX.findall(scope.className): 28 return 29 name_token: Token | None = \ 30 prev_sibling(scope.bodyStart, lambda t: t.str == scope.className) 31 self.report_error( 32 name_token if name_token is not None else scope.bodyStart, 33 Messages.get( 34 self._MSG, 35 self._ABBREVIATION_REGEX.sub( 36 lambda m: m.group(0)[0] + m.group(0)[1:].lower(), 37 scope.className, 38 ), 39 ), 40 )
def
visit_scope(self, scope: cppcheck.Cppcheck.addons.cppcheckdata.Scope) -> None:
24 def visit_scope(self, scope: Scope) -> None: 25 # checks for violation 26 if scope.className is None or \ 27 not self._ABBREVIATION_REGEX.findall(scope.className): 28 return 29 name_token: Token | None = \ 30 prev_sibling(scope.bodyStart, lambda t: t.str == scope.className) 31 self.report_error( 32 name_token if name_token is not None else scope.bodyStart, 33 Messages.get( 34 self._MSG, 35 self._ABBREVIATION_REGEX.sub( 36 lambda m: m.group(0)[0] + m.group(0)[1:].lower(), 37 scope.className, 38 ), 39 ), 40 )
class
AssignmentWrapChecker(rulebook_cppcheck.checkers.rulebook_checkers.RulebookTokenChecker):
11class AssignmentWrapChecker(RulebookTokenChecker): 12 """See detail: https://hanggrian.github.io/rulebook/rules/#assignment-wrap""" 13 ID: str = 'assignment-wrap' 14 _MSG: str = 'assignment.wrap' 15 16 _START_TOKENS: frozenset[str] = frozenset(['{', '[', '(', 'lambda']) 17 18 def process_tokens(self, tokens: list[Token]) -> None: 19 for token in [t for t in tokens if t.str == '=' and t.isAssignmentOp]: 20 rhs_start: Token | None = token.next 21 if rhs_start is None or \ 22 rhs_start.str in self._START_TOKENS: 23 continue 24 rhs_end: Token | None = token.astOperand2 25 if rhs_end is None: 26 continue 27 last_rhs_token: Token = rhs_end 28 while last_rhs_token.astOperand2 is not None: 29 last_rhs_token = last_rhs_token.astOperand2 30 if last_rhs_token.str == ';': 31 last_rhs_token = last_rhs_token.previous 32 33 # checks for violation 34 if rhs_start.linenr != token.linenr or \ 35 last_rhs_token.linenr <= token.linenr: 36 continue 37 self.report_error(token, Messages.get(self._MSG))
def
process_tokens(self, tokens: list[cppcheck.Cppcheck.addons.cppcheckdata.Token]) -> None:
18 def process_tokens(self, tokens: list[Token]) -> None: 19 for token in [t for t in tokens if t.str == '=' and t.isAssignmentOp]: 20 rhs_start: Token | None = token.next 21 if rhs_start is None or \ 22 rhs_start.str in self._START_TOKENS: 23 continue 24 rhs_end: Token | None = token.astOperand2 25 if rhs_end is None: 26 continue 27 last_rhs_token: Token = rhs_end 28 while last_rhs_token.astOperand2 is not None: 29 last_rhs_token = last_rhs_token.astOperand2 30 if last_rhs_token.str == ';': 31 last_rhs_token = last_rhs_token.previous 32 33 # checks for violation 34 if rhs_start.linenr != token.linenr or \ 35 last_rhs_token.linenr <= token.linenr: 36 continue 37 self.report_error(token, Messages.get(self._MSG))
class
BlockCommentSpacesChecker(rulebook_cppcheck.checkers.rulebook_checkers.RulebookFileChecker):
13class BlockCommentSpacesChecker(RulebookFileChecker): 14 """See detail: https://hanggrian.github.io/rulebook/rules/#block-comment-spaces""" 15 ID: str = 'block-comment-spaces' 16 _MSG_SINGLE_START: str = 'block.comment.spaces.single.start' 17 _MSG_SINGLE_END: str = 'block.comment.spaces.single.end' 18 _MSG_MULTI: str = 'block.comment.spaces.multi' 19 20 _BLOCK_COMMENT_START_REGEX: Pattern = re(r'^/\*+[^/*\s]') 21 _BLOCK_COMMENT_CENTER_REGEX: Pattern = re(r'^\s*\*[^\s/]') 22 _BLOCK_COMMENT_END_REGEX: Pattern = re(r'[^\s\*]\*/$') 23 24 def check_file(self, token: Token, content: str) -> None: 25 for match in finditer(r'/\*.*?\*/', content, DOTALL): 26 comment_text: str = match.group() 27 start_line: int = content.count('\n', 0, match.start()) + 1 28 lines: list[str] = comment_text.splitlines() 29 30 # checks for violation 31 if self._BLOCK_COMMENT_START_REGEX.search(lines[0]): 32 self.report_error( 33 token, 34 Messages.get(self._MSG_SINGLE_START), 35 start_line, 36 ) 37 if self._BLOCK_COMMENT_END_REGEX.search(lines[-1]): 38 self.report_error( 39 token, 40 Messages.get(self._MSG_SINGLE_END), 41 start_line + len(lines) - 1, 42 ) 43 for i in range(1, len(lines)): 44 if not self._BLOCK_COMMENT_CENTER_REGEX.search(lines[i]): 45 continue 46 self.report_error( 47 token, 48 Messages.get(self._MSG_MULTI), 49 start_line + i, 50 )
def
check_file( self, token: cppcheck.Cppcheck.addons.cppcheckdata.Token, content: str) -> None:
24 def check_file(self, token: Token, content: str) -> None: 25 for match in finditer(r'/\*.*?\*/', content, DOTALL): 26 comment_text: str = match.group() 27 start_line: int = content.count('\n', 0, match.start()) + 1 28 lines: list[str] = comment_text.splitlines() 29 30 # checks for violation 31 if self._BLOCK_COMMENT_START_REGEX.search(lines[0]): 32 self.report_error( 33 token, 34 Messages.get(self._MSG_SINGLE_START), 35 start_line, 36 ) 37 if self._BLOCK_COMMENT_END_REGEX.search(lines[-1]): 38 self.report_error( 39 token, 40 Messages.get(self._MSG_SINGLE_END), 41 start_line + len(lines) - 1, 42 ) 43 for i in range(1, len(lines)): 44 if not self._BLOCK_COMMENT_CENTER_REGEX.search(lines[i]): 45 continue 46 self.report_error( 47 token, 48 Messages.get(self._MSG_MULTI), 49 start_line + i, 50 )
class
BlockCommentTrimChecker(rulebook_cppcheck.checkers.rulebook_checkers.RulebookFileChecker):
13class BlockCommentTrimChecker(RulebookFileChecker): 14 """See detail: https://hanggrian.github.io/rulebook/rules/#block-comment-trim""" 15 ID: str = 'block-comment-trim' 16 _MSG_FIRST: str = 'block.comment.trim.first' 17 _MSG_LAST: str = 'block.comment.trim.last' 18 19 _EMPTY_LINES: frozenset[str] = frozenset(['*', '']) 20 21 def check_file(self, token: Token, content: str) -> None: 22 for match in finditer(r'/\*(.*?)\*/', content, DOTALL): 23 comment_body: str = match.group(1) 24 start_line: int = content.count('\n', 0, match.start()) + 1 25 lines: list[str] = comment_body.splitlines() 26 27 # filter out block comments that couldn't be trimmed 28 if len(lines) < 3: 29 continue 30 31 # collect content 32 content_indices = [ 33 i for i, line in enumerate(lines) if line.strip() not in self._EMPTY_LINES 34 ] 35 if not content_indices: 36 continue 37 38 # check first line 39 first_line: str = lines[0].strip() 40 second_line: str = lines[1].strip() 41 if (first_line in self._EMPTY_LINES) and \ 42 second_line == '*': 43 if content_indices[0] > 1: 44 self.report_error( 45 token, 46 Messages.get(self._MSG_FIRST), 47 start_line + 1, 48 ) 49 50 # check last line 51 last_line: str = lines[-1].strip() 52 penultimate_line: str = lines[-2].strip() 53 if last_line != '' or \ 54 penultimate_line != '*': 55 continue 56 if content_indices[-1] >= len(lines) - 2: 57 continue 58 self.report_error( 59 token, 60 Messages.get(self._MSG_LAST), 61 start_line + len(lines) - 1, 62 )
def
check_file( self, token: cppcheck.Cppcheck.addons.cppcheckdata.Token, content: str) -> None:
21 def check_file(self, token: Token, content: str) -> None: 22 for match in finditer(r'/\*(.*?)\*/', content, DOTALL): 23 comment_body: str = match.group(1) 24 start_line: int = content.count('\n', 0, match.start()) + 1 25 lines: list[str] = comment_body.splitlines() 26 27 # filter out block comments that couldn't be trimmed 28 if len(lines) < 3: 29 continue 30 31 # collect content 32 content_indices = [ 33 i for i, line in enumerate(lines) if line.strip() not in self._EMPTY_LINES 34 ] 35 if not content_indices: 36 continue 37 38 # check first line 39 first_line: str = lines[0].strip() 40 second_line: str = lines[1].strip() 41 if (first_line in self._EMPTY_LINES) and \ 42 second_line == '*': 43 if content_indices[0] > 1: 44 self.report_error( 45 token, 46 Messages.get(self._MSG_FIRST), 47 start_line + 1, 48 ) 49 50 # check last line 51 last_line: str = lines[-1].strip() 52 penultimate_line: str = lines[-2].strip() 53 if last_line != '' or \ 54 penultimate_line != '*': 55 continue 56 if content_indices[-1] >= len(lines) - 2: 57 continue 58 self.report_error( 59 token, 60 Messages.get(self._MSG_LAST), 61 start_line + len(lines) - 1, 62 )
class
BlockTagIndentationChecker(rulebook_cppcheck.checkers.rulebook_checkers.RulebookFileChecker):
13class BlockTagIndentationChecker(RulebookFileChecker): 14 """See detail: https://hanggrian.github.io/rulebook/rules/#block-tag-indentation""" 15 ID: str = 'block-tag-indentation' 16 _MSG: str = 'block.tag.indentation' 17 18 _BLOCK_TAG_REGEX: Pattern = re_compile(r'^\s*\*\s+@\w+') 19 _CONTINUATION_LINE_REGEX: Pattern = re_compile(r'^\s*\*(\s+)') 20 21 def check_file(self, token: Token, content: str) -> None: 22 for match in finditer(r'/\*\*.*?\*/', content, DOTALL): 23 comment_text: str = match.group() 24 start_line: int = content.count('\n', 0, match.start()) + 1 25 lines: list[str] = comment_text.splitlines() 26 in_block_tag: bool = False 27 for i, line in enumerate(lines): 28 # target block tag 29 if self._BLOCK_TAG_REGEX.search(line): 30 in_block_tag = True 31 continue 32 if not in_block_tag: 33 continue 34 stripped: str = line.strip() 35 if not stripped or \ 36 stripped == '*' or \ 37 stripped == '*/': 38 in_block_tag = False 39 continue 40 if stripped.startswith('* @'): 41 in_block_tag = True 42 continue 43 44 # checks for violation 45 match_indent: Match | None = self._CONTINUATION_LINE_REGEX.match(line) 46 if match_indent is None: 47 continue 48 indent_size: int = len(match_indent.group(1)) 49 if indent_size == 5: 50 continue 51 self.report_error(token, Messages.get(self._MSG), start_line + i)
def
check_file( self, token: cppcheck.Cppcheck.addons.cppcheckdata.Token, content: str) -> None:
21 def check_file(self, token: Token, content: str) -> None: 22 for match in finditer(r'/\*\*.*?\*/', content, DOTALL): 23 comment_text: str = match.group() 24 start_line: int = content.count('\n', 0, match.start()) + 1 25 lines: list[str] = comment_text.splitlines() 26 in_block_tag: bool = False 27 for i, line in enumerate(lines): 28 # target block tag 29 if self._BLOCK_TAG_REGEX.search(line): 30 in_block_tag = True 31 continue 32 if not in_block_tag: 33 continue 34 stripped: str = line.strip() 35 if not stripped or \ 36 stripped == '*' or \ 37 stripped == '*/': 38 in_block_tag = False 39 continue 40 if stripped.startswith('* @'): 41 in_block_tag = True 42 continue 43 44 # checks for violation 45 match_indent: Match | None = self._CONTINUATION_LINE_REGEX.match(line) 46 if match_indent is None: 47 continue 48 indent_size: int = len(match_indent.group(1)) 49 if indent_size == 5: 50 continue 51 self.report_error(token, Messages.get(self._MSG), start_line + i)
class
BlockTagPunctuationChecker(rulebook_cppcheck.checkers.rulebook_checkers.RulebookFileChecker):
14class BlockTagPunctuationChecker(RulebookFileChecker): 15 """See detail: https://hanggrian.github.io/rulebook/rules/#block-tag-punctuation""" 16 ID: str = 'block-tag-punctuation' 17 _MSG: str = 'block.tag.punctuation' 18 ARGS: list[str] = [PUNCTUATE_BLOCK_TAGS_OPTION] 19 20 _PUNCTUATIONS: frozenset[str] = frozenset(['.', '!', '?', ')']) 21 22 def __init__(self) -> None: 23 super().__init__() 24 self._block_tags: set[str] = { 25 '@param', 26 '@return', 27 '@returns', 28 } 29 30 def before_run(self, args: dict[str, str]) -> None: 31 self._block_tags = set(args[PUNCTUATE_BLOCK_TAGS_OPTION].split(',')) 32 33 def check_file(self, token: Token, content: str) -> None: 34 for match in finditer(r'/\*(.*?)\*/', content, DOTALL): 35 comment_body: str = match.group(1) 36 start_line: int = content.count('\n', 0, match.start()) 37 lines: list[str] = comment_body.splitlines() 38 39 # strategy to capture continuation indent 40 current_tag: str | None = None 41 last_text: str = '' 42 last_text_line_idx: int = -1 43 44 for i, line in enumerate(lines): 45 stripped: str = line.strip().lstrip('*').strip() 46 if not stripped: 47 continue 48 49 # only enforce certain tags 50 found_tag: str | None = \ 51 next((t for t in self._block_tags if stripped.startswith(t)), None) 52 53 # long descriptions have multiple lines, take only the last one 54 if found_tag: 55 if current_tag is not None and \ 56 last_text and \ 57 last_text[-1] not in self._PUNCTUATIONS: 58 self.report_error( 59 token, 60 Messages.get(self._MSG, current_tag), 61 start_line + last_text_line_idx, 62 ) 63 64 current_tag = found_tag 65 remainder: str = stripped[len(found_tag):].strip() 66 if current_tag == '@param': 67 parts: list[str] = remainder.split(maxsplit=1) 68 last_text = parts[1] if len(parts) > 1 else '' 69 else: 70 last_text = remainder 71 last_text_line_idx = i if last_text else -1 72 elif current_tag: 73 last_text = stripped 74 last_text_line_idx = i 75 76 # checks for violation 77 if current_tag is None or \ 78 not last_text or \ 79 last_text[-1] in self._PUNCTUATIONS: 80 continue 81 self.report_error( 82 token, 83 Messages.get(self._MSG, current_tag), 84 start_line + last_text_line_idx, 85 )
def
check_file( self, token: cppcheck.Cppcheck.addons.cppcheckdata.Token, content: str) -> None:
33 def check_file(self, token: Token, content: str) -> None: 34 for match in finditer(r'/\*(.*?)\*/', content, DOTALL): 35 comment_body: str = match.group(1) 36 start_line: int = content.count('\n', 0, match.start()) 37 lines: list[str] = comment_body.splitlines() 38 39 # strategy to capture continuation indent 40 current_tag: str | None = None 41 last_text: str = '' 42 last_text_line_idx: int = -1 43 44 for i, line in enumerate(lines): 45 stripped: str = line.strip().lstrip('*').strip() 46 if not stripped: 47 continue 48 49 # only enforce certain tags 50 found_tag: str | None = \ 51 next((t for t in self._block_tags if stripped.startswith(t)), None) 52 53 # long descriptions have multiple lines, take only the last one 54 if found_tag: 55 if current_tag is not None and \ 56 last_text and \ 57 last_text[-1] not in self._PUNCTUATIONS: 58 self.report_error( 59 token, 60 Messages.get(self._MSG, current_tag), 61 start_line + last_text_line_idx, 62 ) 63 64 current_tag = found_tag 65 remainder: str = stripped[len(found_tag):].strip() 66 if current_tag == '@param': 67 parts: list[str] = remainder.split(maxsplit=1) 68 last_text = parts[1] if len(parts) > 1 else '' 69 else: 70 last_text = remainder 71 last_text_line_idx = i if last_text else -1 72 elif current_tag: 73 last_text = stripped 74 last_text_line_idx = i 75 76 # checks for violation 77 if current_tag is None or \ 78 not last_text or \ 79 last_text[-1] in self._PUNCTUATIONS: 80 continue 81 self.report_error( 82 token, 83 Messages.get(self._MSG, current_tag), 84 start_line + last_text_line_idx, 85 )
class
CaseSeparatorChecker(rulebook_cppcheck.checkers.rulebook_checkers.RulebookChecker):
12class CaseSeparatorChecker(RulebookChecker): 13 """See detail: https://hanggrian.github.io/rulebook/rules/#case-separator""" 14 ID: str = 'case-separator' 15 _MSG_MISSING: str = 'case.separator.missing' 16 _MSG_UNEXPECTED: str = 'case.separator.unexpected' 17 18 _BRANCH_TOKENS: frozenset[str] = frozenset(['case', 'default']) 19 20 def __init__(self) -> None: 21 super().__init__() 22 self._reported_errors: set[tuple[str, int, int, str]] = set() 23 24 def get_scopeset(self) -> set[str]: 25 return {'Switch'} 26 27 def visit_scope(self, scope: Scope) -> None: 28 # collect cases 29 body_start: Token | None = scope.bodyStart 30 body_end: Token | None = scope.bodyEnd 31 if body_start is None or \ 32 body_end is None: 33 return 34 35 groups: list[list[Token]] = [] 36 curr_token: Token | None = body_start.next 37 38 while curr_token is not None and \ 39 curr_token is not body_end: 40 if not isinstance(curr_token, Token): 41 continue 42 if curr_token.str in self._BRANCH_TOKENS: 43 case_keyword: Token = curr_token 44 case_label: Token = curr_token.next if curr_token.str == 'case' else curr_token 45 46 end_token: Token = case_keyword 47 scan: Token | None = curr_token.next 48 colon_found: bool = False 49 has_body: bool = False 50 51 while scan is not None and \ 52 scan is not body_end: 53 if not isinstance(scan, Token): 54 continue 55 if scan.str in self._BRANCH_TOKENS and \ 56 scan.scope is scope: 57 break 58 if scan.str == ':': 59 colon_found = True 60 elif colon_found: 61 has_body = True 62 end_token = scan 63 scan = scan.next 64 65 if not has_body and \ 66 scan is not None and \ 67 scan is not body_end and \ 68 isinstance(scan, Token) and \ 69 scan.str in self._BRANCH_TOKENS: 70 end_token = scan 71 72 groups.append([case_label, case_keyword, end_token]) 73 curr_token = scan 74 continue 75 curr_token = curr_token.next 76 77 # checks for violation 78 if not groups: 79 return 80 81 for i in range(1, len(groups)): 82 prev_group: list[Token] = groups[i - 1] 83 curr_group: list[Token] = groups[i] 84 85 prev_is_multiline: bool = prev_group[2].linenr - prev_group[1].linenr > 0 86 prev_has_body: bool = prev_group[2].str not in self._BRANCH_TOKENS 87 88 if not prev_has_body: 89 curr_is_single_line: bool = curr_group[2].linenr == curr_group[1].linenr 90 if not curr_is_single_line: 91 continue 92 93 prev_end_line: int = prev_group[2].linenr 94 curr_start_line: int = curr_group[1].linenr 95 line_diff: int = curr_start_line - prev_end_line 96 97 if prev_is_multiline: 98 if line_diff != 2: 99 self._report_error_once(prev_group[0], Messages.get(self._MSG_MISSING)) 100 elif line_diff != 1: 101 self._report_error_once(prev_group[0], Messages.get(self._MSG_UNEXPECTED)) 102 103 def _report_error_once(self, token: Token, message: str) -> None: 104 error_key: tuple[str, int, int, str] = (token.file, token.linenr, token.column, message) 105 if error_key in self._reported_errors: 106 return 107 self._reported_errors.add(error_key) 108 self.report_error(token, message)
def
visit_scope(self, scope: cppcheck.Cppcheck.addons.cppcheckdata.Scope) -> None:
27 def visit_scope(self, scope: Scope) -> None: 28 # collect cases 29 body_start: Token | None = scope.bodyStart 30 body_end: Token | None = scope.bodyEnd 31 if body_start is None or \ 32 body_end is None: 33 return 34 35 groups: list[list[Token]] = [] 36 curr_token: Token | None = body_start.next 37 38 while curr_token is not None and \ 39 curr_token is not body_end: 40 if not isinstance(curr_token, Token): 41 continue 42 if curr_token.str in self._BRANCH_TOKENS: 43 case_keyword: Token = curr_token 44 case_label: Token = curr_token.next if curr_token.str == 'case' else curr_token 45 46 end_token: Token = case_keyword 47 scan: Token | None = curr_token.next 48 colon_found: bool = False 49 has_body: bool = False 50 51 while scan is not None and \ 52 scan is not body_end: 53 if not isinstance(scan, Token): 54 continue 55 if scan.str in self._BRANCH_TOKENS and \ 56 scan.scope is scope: 57 break 58 if scan.str == ':': 59 colon_found = True 60 elif colon_found: 61 has_body = True 62 end_token = scan 63 scan = scan.next 64 65 if not has_body and \ 66 scan is not None and \ 67 scan is not body_end and \ 68 isinstance(scan, Token) and \ 69 scan.str in self._BRANCH_TOKENS: 70 end_token = scan 71 72 groups.append([case_label, case_keyword, end_token]) 73 curr_token = scan 74 continue 75 curr_token = curr_token.next 76 77 # checks for violation 78 if not groups: 79 return 80 81 for i in range(1, len(groups)): 82 prev_group: list[Token] = groups[i - 1] 83 curr_group: list[Token] = groups[i] 84 85 prev_is_multiline: bool = prev_group[2].linenr - prev_group[1].linenr > 0 86 prev_has_body: bool = prev_group[2].str not in self._BRANCH_TOKENS 87 88 if not prev_has_body: 89 curr_is_single_line: bool = curr_group[2].linenr == curr_group[1].linenr 90 if not curr_is_single_line: 91 continue 92 93 prev_end_line: int = prev_group[2].linenr 94 curr_start_line: int = curr_group[1].linenr 95 line_diff: int = curr_start_line - prev_end_line 96 97 if prev_is_multiline: 98 if line_diff != 2: 99 self._report_error_once(prev_group[0], Messages.get(self._MSG_MISSING)) 100 elif line_diff != 1: 101 self._report_error_once(prev_group[0], Messages.get(self._MSG_UNEXPECTED))
class
ChainCallWrapChecker(rulebook_cppcheck.checkers.rulebook_checkers.RulebookTokenChecker):
11class ChainCallWrapChecker(RulebookTokenChecker): 12 """See detail: https://hanggrian.github.io/rulebook/rules/#chain-call-wrap""" 13 ID: str = 'chain-call-wrap' 14 _MSG_MISSING: str = 'chain.call.wrap.missing' 15 _MSG_UNEXPECTED: str = 'chain.call.wrap.unexpected' 16 17 _SHOULD_BREAK: frozenset[str] = frozenset(['+', ';', '{', '}', '=', '?', ':']) 18 _CLOSING_PARENTHESES: frozenset[str] = frozenset([')', '}']) 19 20 def __init__(self) -> None: 21 super().__init__() 22 self._reported_errors: set[tuple[str, int, int, str]] = set() 23 24 def process_tokens(self, tokens: list[Token]) -> None: 25 for token in [ 26 t for t in tokens if t.str == '.' and (not t.previous or t.previous.str != '.') 27 ]: 28 # check if this dot is inside function call parameters 29 check_token: Token | None = token.previous 30 paren_depth: int = 0 31 while check_token is not None: 32 if check_token.str == ')': 33 paren_depth += 1 34 elif check_token.str == '(': 35 if paren_depth == 0: 36 break 37 paren_depth -= 1 38 elif check_token.str in self._SHOULD_BREAK: 39 check_token = None 40 break 41 check_token = check_token.previous 42 if check_token is not None and \ 43 check_token.str == '(': 44 continue 45 46 # look backwards to detect ternary 47 check_token = token.previous 48 paren_depth = 0 49 found_ternary = False 50 while check_token is not None: 51 if check_token.str == ')': 52 paren_depth += 1 53 elif check_token.str == '(': 54 paren_depth -= 1 55 elif paren_depth == 0: 56 if check_token.str == '?': 57 found_ternary = True 58 break 59 if check_token.str in self._SHOULD_BREAK: 60 break 61 check_token = check_token.previous 62 63 if found_ternary: 64 continue 65 66 # collect dots 67 dots: list[Token] = [] 68 curr: Token | None = token 69 depth: int = 0 70 while curr is not None: 71 if curr.str == '(': 72 depth += 1 73 elif curr.str == ')': 74 depth -= 1 75 if depth < 0: 76 break 77 if depth == 0: 78 if curr.str == '.': 79 dots.append(curr) 80 elif curr.str in self._SHOULD_BREAK: 81 break 82 curr = curr.next 83 84 # skip dots in single-line 85 if len(dots) < 2: 86 continue 87 first_dot: Token = dots[0] 88 if all(d.linenr == first_dot.linenr for d in dots): 89 continue 90 91 # checks for violation 92 for dot in dots: 93 prev_token: Token | None = dot.previous 94 if prev_token is None: 95 continue 96 if prev_token.str in self._CLOSING_PARENTHESES and \ 97 prev_token.previous and \ 98 prev_token.linenr > prev_token.previous.linenr: 99 if dot.linenr > prev_token.linenr: 100 self._report_error_once(dot, Messages.get(self._MSG_UNEXPECTED)) 101 continue 102 if dot.linenr == prev_token.linenr: 103 self._report_error_once(dot, Messages.get(self._MSG_MISSING)) 104 105 def _report_error_once(self, token: Token, message: str) -> None: 106 error_key: tuple[str, int, int, str] = (token.file, token.linenr, token.column, message) 107 if error_key in self._reported_errors: 108 return 109 self._reported_errors.add(error_key) 110 self.report_error(token, message)
def
process_tokens(self, tokens: list[cppcheck.Cppcheck.addons.cppcheckdata.Token]) -> None:
24 def process_tokens(self, tokens: list[Token]) -> None: 25 for token in [ 26 t for t in tokens if t.str == '.' and (not t.previous or t.previous.str != '.') 27 ]: 28 # check if this dot is inside function call parameters 29 check_token: Token | None = token.previous 30 paren_depth: int = 0 31 while check_token is not None: 32 if check_token.str == ')': 33 paren_depth += 1 34 elif check_token.str == '(': 35 if paren_depth == 0: 36 break 37 paren_depth -= 1 38 elif check_token.str in self._SHOULD_BREAK: 39 check_token = None 40 break 41 check_token = check_token.previous 42 if check_token is not None and \ 43 check_token.str == '(': 44 continue 45 46 # look backwards to detect ternary 47 check_token = token.previous 48 paren_depth = 0 49 found_ternary = False 50 while check_token is not None: 51 if check_token.str == ')': 52 paren_depth += 1 53 elif check_token.str == '(': 54 paren_depth -= 1 55 elif paren_depth == 0: 56 if check_token.str == '?': 57 found_ternary = True 58 break 59 if check_token.str in self._SHOULD_BREAK: 60 break 61 check_token = check_token.previous 62 63 if found_ternary: 64 continue 65 66 # collect dots 67 dots: list[Token] = [] 68 curr: Token | None = token 69 depth: int = 0 70 while curr is not None: 71 if curr.str == '(': 72 depth += 1 73 elif curr.str == ')': 74 depth -= 1 75 if depth < 0: 76 break 77 if depth == 0: 78 if curr.str == '.': 79 dots.append(curr) 80 elif curr.str in self._SHOULD_BREAK: 81 break 82 curr = curr.next 83 84 # skip dots in single-line 85 if len(dots) < 2: 86 continue 87 first_dot: Token = dots[0] 88 if all(d.linenr == first_dot.linenr for d in dots): 89 continue 90 91 # checks for violation 92 for dot in dots: 93 prev_token: Token | None = dot.previous 94 if prev_token is None: 95 continue 96 if prev_token.str in self._CLOSING_PARENTHESES and \ 97 prev_token.previous and \ 98 prev_token.linenr > prev_token.previous.linenr: 99 if dot.linenr > prev_token.linenr: 100 self._report_error_once(dot, Messages.get(self._MSG_UNEXPECTED)) 101 continue 102 if dot.linenr == prev_token.linenr: 103 self._report_error_once(dot, Messages.get(self._MSG_MISSING))
class
ClassNameChecker(rulebook_cppcheck.checkers.rulebook_checkers.RulebookChecker):
12class ClassNameChecker(RulebookChecker): 13 """See detail: https://hanggrian.github.io/rulebook/rules/#class-name""" 14 ID: str = 'class-name' 15 _MSG: str = 'class.name' 16 17 def get_scopeset(self) -> set[str]: 18 return {'Class', 'Struct', 'Union', 'Enum'} 19 20 def visit_scope(self, scope: Scope) -> None: 21 # checks for violation 22 class_name: str | None = scope.className 23 if class_name is None or \ 24 class_name[0].isupper() and \ 25 '_' not in class_name: 26 return 27 name_token: Token | None = prev_sibling(scope.bodyStart, lambda t: t.str == class_name) 28 self.report_error( 29 name_token if name_token is not None else scope.bodyStart, 30 Messages.get( 31 self._MSG, 32 ''.join(p[0].upper() + p[1:] if p else '' for p in class_name.split('_')), 33 ), 34 )
def
visit_scope(self, scope: cppcheck.Cppcheck.addons.cppcheckdata.Scope) -> None:
20 def visit_scope(self, scope: Scope) -> None: 21 # checks for violation 22 class_name: str | None = scope.className 23 if class_name is None or \ 24 class_name[0].isupper() and \ 25 '_' not in class_name: 26 return 27 name_token: Token | None = prev_sibling(scope.bodyStart, lambda t: t.str == class_name) 28 self.report_error( 29 name_token if name_token is not None else scope.bodyStart, 30 Messages.get( 31 self._MSG, 32 ''.join(p[0].upper() + p[1:] if p else '' for p in class_name.split('_')), 33 ), 34 )
class
CommentSpacesChecker(rulebook_cppcheck.checkers.rulebook_checkers.RulebookFileChecker):
11class CommentSpacesChecker(RulebookFileChecker): 12 """See detail: https://hanggrian.github.io/rulebook/rules/#comment-spaces""" 13 ID: str = 'comment-spaces' 14 _MSG: str = 'comment.spaces' 15 16 def check_file(self, token: Token, content: str) -> None: 17 # checks for violation 18 for lineno, line in enumerate(content.splitlines(), 1): 19 line_stripped: str = line.lstrip() 20 if line_stripped.startswith('*'): 21 continue 22 if '//' not in line_stripped: 23 continue 24 comment_pos: int = line.find('//') 25 before_comment: str = line[:comment_pos] 26 double_quote_count: int = 0 27 single_quote_count: int = 0 28 i: int = 0 29 while i < len(before_comment): 30 if before_comment[i] == '\\': 31 i += 2 32 continue 33 if before_comment[i] == '"': 34 double_quote_count += 1 35 elif before_comment[i] == "'": 36 single_quote_count += 1 37 i += 1 38 if double_quote_count % 2 == 1 or \ 39 single_quote_count % 2 == 1: 40 continue 41 line_stripped = line_stripped.split('//')[1] 42 if line_stripped.startswith(' ') or \ 43 line_stripped.replace('/', '').strip() == '': 44 continue 45 self.report_error(token, Messages.get(self._MSG), lineno, line.find('//') + 1)
def
check_file( self, token: cppcheck.Cppcheck.addons.cppcheckdata.Token, content: str) -> None:
16 def check_file(self, token: Token, content: str) -> None: 17 # checks for violation 18 for lineno, line in enumerate(content.splitlines(), 1): 19 line_stripped: str = line.lstrip() 20 if line_stripped.startswith('*'): 21 continue 22 if '//' not in line_stripped: 23 continue 24 comment_pos: int = line.find('//') 25 before_comment: str = line[:comment_pos] 26 double_quote_count: int = 0 27 single_quote_count: int = 0 28 i: int = 0 29 while i < len(before_comment): 30 if before_comment[i] == '\\': 31 i += 2 32 continue 33 if before_comment[i] == '"': 34 double_quote_count += 1 35 elif before_comment[i] == "'": 36 single_quote_count += 1 37 i += 1 38 if double_quote_count % 2 == 1 or \ 39 single_quote_count % 2 == 1: 40 continue 41 line_stripped = line_stripped.split('//')[1] 42 if line_stripped.startswith(' ') or \ 43 line_stripped.replace('/', '').strip() == '': 44 continue 45 self.report_error(token, Messages.get(self._MSG), lineno, line.find('//') + 1)
class
CommentTrimChecker(rulebook_cppcheck.checkers.rulebook_checkers.RulebookFileChecker):
13class CommentTrimChecker(RulebookFileChecker): 14 """See detail: https://hanggrian.github.io/rulebook/rules/#comment-trim""" 15 ID: str = 'comment-trim' 16 _MSG: str = 'comment.trim' 17 18 _EOL_COMMENT_REGEX: Pattern = re(r'^\s*//\s*$') 19 _ANY_COMMENT_REGEX: Pattern = re(r'^\s*//') 20 21 def check_file(self, token: Token, content: str) -> None: 22 lines: list[str] = content.splitlines() 23 i: int = 0 24 while i < len(lines): 25 # find start of a comment block 26 if self._ANY_COMMENT_REGEX.match(lines[i]): 27 start_block: int = i 28 # find end of the block 29 j: int = i 30 while j + 1 < len(lines) and \ 31 self._ANY_COMMENT_REGEX.match(lines[j + 1]): 32 j += 1 33 34 # check first line of block 35 if start_block != j and \ 36 self._EOL_COMMENT_REGEX.match(lines[start_block]): 37 self.report_error(token, Messages.get(self._MSG), start_block + 1) 38 39 # check last line of block 40 if start_block != j and \ 41 self._EOL_COMMENT_REGEX.match(lines[j]): 42 self.report_error(token, Messages.get(self._MSG), j + 1) 43 44 # skip to end of block 45 i = j 46 i += 1
def
check_file( self, token: cppcheck.Cppcheck.addons.cppcheckdata.Token, content: str) -> None:
21 def check_file(self, token: Token, content: str) -> None: 22 lines: list[str] = content.splitlines() 23 i: int = 0 24 while i < len(lines): 25 # find start of a comment block 26 if self._ANY_COMMENT_REGEX.match(lines[i]): 27 start_block: int = i 28 # find end of the block 29 j: int = i 30 while j + 1 < len(lines) and \ 31 self._ANY_COMMENT_REGEX.match(lines[j + 1]): 32 j += 1 33 34 # check first line of block 35 if start_block != j and \ 36 self._EOL_COMMENT_REGEX.match(lines[start_block]): 37 self.report_error(token, Messages.get(self._MSG), start_block + 1) 38 39 # check last line of block 40 if start_block != j and \ 41 self._EOL_COMMENT_REGEX.match(lines[j]): 42 self.report_error(token, Messages.get(self._MSG), j + 1) 43 44 # skip to end of block 45 i = j 46 i += 1
class
ComplicatedAssignmentChecker(rulebook_cppcheck.checkers.rulebook_checkers.RulebookTokenChecker):
12class ComplicatedAssignmentChecker(RulebookTokenChecker): 13 """See detail: https://hanggrian.github.io/rulebook/rules/#complicated-assignment""" 14 ID: str = 'complicated-assignment' 15 _MSG: str = 'complicated.assignment' 16 17 _SHORTHAND_OPERATIONS: frozenset[str] = frozenset(['+', '-', '*', '/', '%']) 18 19 def process_tokens(self, tokens: list[Token]) -> None: 20 for token in tokens: 21 # skip shorthand operator 22 if token.str != '=': 23 continue 24 25 # checks for violation 26 lhs: Token | None = token.previous 27 if lhs is None or not lhs.isName: 28 continue 29 match = \ 30 next_sibling( 31 token.next, 32 lambda t: \ 33 t.str == ';' or \ 34 t.isName and \ 35 isinstance(lhs, Token) and \ 36 t.str == lhs.str and \ 37 t.next is not None and \ 38 t.next.str in self._SHORTHAND_OPERATIONS, 39 ) 40 if match is None or match.str == ';': 41 continue 42 self.report_error(token, Messages.get(self._MSG, f'{match.next.str}='))
def
process_tokens(self, tokens: list[cppcheck.Cppcheck.addons.cppcheckdata.Token]) -> None:
19 def process_tokens(self, tokens: list[Token]) -> None: 20 for token in tokens: 21 # skip shorthand operator 22 if token.str != '=': 23 continue 24 25 # checks for violation 26 lhs: Token | None = token.previous 27 if lhs is None or not lhs.isName: 28 continue 29 match = \ 30 next_sibling( 31 token.next, 32 lambda t: \ 33 t.str == ';' or \ 34 t.isName and \ 35 isinstance(lhs, Token) and \ 36 t.str == lhs.str and \ 37 t.next is not None and \ 38 t.next.str in self._SHORTHAND_OPERATIONS, 39 ) 40 if match is None or match.str == ';': 41 continue 42 self.report_error(token, Messages.get(self._MSG, f'{match.next.str}='))
class
DuplicateBlankLineChecker(rulebook_cppcheck.checkers.rulebook_checkers.RulebookFileChecker):
11class DuplicateBlankLineChecker(RulebookFileChecker): 12 """See detail: https://hanggrian.github.io/rulebook/rules/#duplicate-blank-line""" 13 ID: str = 'duplicate-blank-line' 14 _MSG: str = 'duplicate.blank.line' 15 16 def check_file(self, token: Token, content: str) -> None: 17 lines: list[str] = content.splitlines() 18 for i in range(1, len(lines)): 19 # checks for violation 20 if lines[i].strip() or \ 21 lines[i - 1].strip(): 22 continue 23 self.report_error(token, Messages.get(self._MSG), i + 1)
class
DuplicateBlankLineInBlockCommentChecker(rulebook_cppcheck.checkers.rulebook_checkers.RulebookFileChecker):
13class DuplicateBlankLineInBlockCommentChecker(RulebookFileChecker): 14 """See detail: https://hanggrian.github.io/rulebook/rules/#duplicate-blank-line-in-block-comment""" 15 ID: str = 'duplicate-blank-line-in-block-comment' 16 _MSG: str = 'duplicate.blank.line.in.block.comment' 17 18 def check_file(self, token: Token, content: str) -> None: 19 for match in finditer(r'/\*(.*?)\*/', content, DOTALL): 20 # checks for violation 21 comment_body: str = match.group(1) 22 start_line: int = content.count('\n', 0, match.start()) + 1 23 lines: list[str] = comment_body.splitlines() 24 for i in range(len(lines) - 1): 25 curr_token: str = lines[i].strip() 26 next_token: str = lines[i + 1].strip() 27 if curr_token != '*' or \ 28 next_token != '*': 29 continue 30 if i == 0 or \ 31 i + 1 == len(lines) - 1: 32 continue 33 self.report_error(token, Messages.get(self._MSG), start_line + i + 1)
def
check_file( self, token: cppcheck.Cppcheck.addons.cppcheckdata.Token, content: str) -> None:
18 def check_file(self, token: Token, content: str) -> None: 19 for match in finditer(r'/\*(.*?)\*/', content, DOTALL): 20 # checks for violation 21 comment_body: str = match.group(1) 22 start_line: int = content.count('\n', 0, match.start()) + 1 23 lines: list[str] = comment_body.splitlines() 24 for i in range(len(lines) - 1): 25 curr_token: str = lines[i].strip() 26 next_token: str = lines[i + 1].strip() 27 if curr_token != '*' or \ 28 next_token != '*': 29 continue 30 if i == 0 or \ 31 i + 1 == len(lines) - 1: 32 continue 33 self.report_error(token, Messages.get(self._MSG), start_line + i + 1)
class
DuplicateBlankLineInCommentChecker(rulebook_cppcheck.checkers.rulebook_checkers.RulebookFileChecker):
13class DuplicateBlankLineInCommentChecker(RulebookFileChecker): 14 """See detail: https://hanggrian.github.io/rulebook/rules/#duplicate-blank-line-in-comment""" 15 ID: str = 'duplicate-blank-line-in-comment' 16 _MSG: str = 'duplicate.blank.line.in.comment' 17 18 _EMPTY_COMMENT_REGEX: Pattern = re_compile(r'^\s*//\s*$') 19 20 def check_file(self, token: Token, content: str) -> None: 21 lines: list[str] = content.splitlines() 22 for i in range(len(lines) - 1): 23 # checks for violation 24 if not self._EMPTY_COMMENT_REGEX.match(lines[i]) or \ 25 not self._EMPTY_COMMENT_REGEX.match(lines[i + 1]): 26 continue 27 self.report_error(token, Messages.get(self._MSG), i + 2)
def
check_file( self, token: cppcheck.Cppcheck.addons.cppcheckdata.Token, content: str) -> None:
20 def check_file(self, token: Token, content: str) -> None: 21 lines: list[str] = content.splitlines() 22 for i in range(len(lines) - 1): 23 # checks for violation 24 if not self._EMPTY_COMMENT_REGEX.match(lines[i]) or \ 25 not self._EMPTY_COMMENT_REGEX.match(lines[i + 1]): 26 continue 27 self.report_error(token, Messages.get(self._MSG), i + 2)
class
DuplicateSpaceChecker(rulebook_cppcheck.checkers.rulebook_checkers.RulebookFileChecker):
13class DuplicateSpaceChecker(RulebookFileChecker): 14 """See detail: https://hanggrian.github.io/rulebook/rules/#duplicate-space""" 15 ID: str = 'duplicate-space' 16 _MSG: str = 'duplicate.space' 17 18 _DUPLICATE_REGEX: Pattern = re(r'(?<=\S)(?<!\*)[ \t]{2,}') 19 _STRING_REGEX: Pattern = re( 20 r'R"([^(]*)\(.*?\)\1"' + 21 r'|"(?:[^"\\]|\\.)*"', 22 ) 23 24 def check_file(self, token: Token, content: str) -> None: 25 for line_number, line in enumerate(content.splitlines(), start=1): 26 # checks for violation 27 match: Match[str] | None = self._DUPLICATE_REGEX.search(self._mask_strings(line)) 28 if match is None: 29 continue 30 self.report_error( 31 token, 32 Messages.get(self._MSG), 33 line_number, 34 match.start() + 1, 35 ) 36 37 @staticmethod 38 def _mask_strings(line: str) -> str: 39 return DuplicateSpaceChecker._STRING_REGEX.sub(lambda m: '_' * len(m.group()), line)
def
check_file( self, token: cppcheck.Cppcheck.addons.cppcheckdata.Token, content: str) -> None:
24 def check_file(self, token: Token, content: str) -> None: 25 for line_number, line in enumerate(content.splitlines(), start=1): 26 # checks for violation 27 match: Match[str] | None = self._DUPLICATE_REGEX.search(self._mask_strings(line)) 28 if match is None: 29 continue 30 self.report_error( 31 token, 32 Messages.get(self._MSG), 33 line_number, 34 match.start() + 1, 35 )
class
FileNameChecker(rulebook_cppcheck.checkers.rulebook_checkers.RulebookFileChecker):
13class FileNameChecker(RulebookFileChecker): 14 """See detail: https://hanggrian.github.io/rulebook/rules/#file-name""" 15 ID: str = 'file-name' 16 _MSG: str = 'file.name' 17 18 _SNAKE_CASE_REGEX: Pattern = re(r'^[a-z0-9_]+\.(c|cpp|h|hpp)$') 19 20 def check_file(self, token: Token, content: str) -> None: 21 # checks for violation 22 filename: str = token.file.split('/')[-1] 23 if '_' in filename: # remove prefix 'XXXXX_file.c' likely from dump 24 filename = filename.split('_')[-1] 25 if self._SNAKE_CASE_REGEX.match(filename): 26 return 27 self.report_error( 28 token, 29 Messages.get(self._MSG, filename.lower().replace('-', '_')), 30 )
def
check_file( self, token: cppcheck.Cppcheck.addons.cppcheckdata.Token, content: str) -> None:
20 def check_file(self, token: Token, content: str) -> None: 21 # checks for violation 22 filename: str = token.file.split('/')[-1] 23 if '_' in filename: # remove prefix 'XXXXX_file.c' likely from dump 24 filename = filename.split('_')[-1] 25 if self._SNAKE_CASE_REGEX.match(filename): 26 return 27 self.report_error( 28 token, 29 Messages.get(self._MSG, filename.lower().replace('-', '_')), 30 )
class
FileSizeChecker(rulebook_cppcheck.checkers.rulebook_checkers.RulebookFileChecker):
12class FileSizeChecker(RulebookFileChecker): 13 """See detail: https://hanggrian.github.io/rulebook/rules/#file-size""" 14 ID: str = 'file-size' 15 _MSG: str = 'file.size' 16 ARGS: list[str] = [MAX_FILE_SIZE_OPTION] 17 18 def __init__(self) -> None: 19 super().__init__() 20 self._max_file_size: int = 1000 21 22 def before_run(self, args: dict[str, str]) -> None: 23 self._max_file_size = int(args[MAX_FILE_SIZE_OPTION]) 24 25 def check_file(self, token: Token, content: str) -> None: 26 # checks for violation 27 if len(content.splitlines()) <= self._max_file_size: 28 return 29 self.report_error(token, Messages.get(self._MSG, self._max_file_size))
FinalNewlineChecker
class
GenericNameChecker(rulebook_cppcheck.checkers.rulebook_checkers.RulebookTokenChecker):
12class GenericNameChecker(RulebookTokenChecker): 13 """See detail: https://hanggrian.github.io/rulebook/rules/#generic-name""" 14 ID: str = 'generic-name' 15 _MSG: str = 'generic.name' 16 17 _TARGET_TOKENS: frozenset[str] = frozenset(['typename', 'class']) 18 19 def process_tokens(self, tokens: list[Token]) -> None: 20 for token in [t for t in tokens if t.str == 'template']: 21 # only target template declaration 22 open_bracket: Token | None = next_sibling(token, lambda t: t.str == '<') 23 if open_bracket is None or \ 24 not open_bracket.link: 25 continue 26 closing: Token | None = open_bracket.link 27 params: list[Token] = [] 28 curr_token: Token | None = open_bracket.next 29 continue_outer: bool = False 30 while curr_token is not None and \ 31 curr_token is not closing: 32 if not isinstance(curr_token, Token): 33 continue 34 if curr_token.str == ',': 35 continue_outer = True 36 break 37 params.append(curr_token) 38 curr_token = curr_token.next 39 if continue_outer or \ 40 len(params) != 2: 41 continue 42 43 # checks for violation 44 keyword_token: Token = params[0] 45 name_token: Token = params[1] 46 if keyword_token.str not in self._TARGET_TOKENS: 47 continue 48 name: str = name_token.str 49 if name_token.type != 'name' or \ 50 name == '...': 51 continue 52 if len(name) == 1 and \ 53 name[0].isupper(): 54 continue 55 self.report_error(name_token, Messages.get(self._MSG))
def
process_tokens(self, tokens: list[cppcheck.Cppcheck.addons.cppcheckdata.Token]) -> None:
19 def process_tokens(self, tokens: list[Token]) -> None: 20 for token in [t for t in tokens if t.str == 'template']: 21 # only target template declaration 22 open_bracket: Token | None = next_sibling(token, lambda t: t.str == '<') 23 if open_bracket is None or \ 24 not open_bracket.link: 25 continue 26 closing: Token | None = open_bracket.link 27 params: list[Token] = [] 28 curr_token: Token | None = open_bracket.next 29 continue_outer: bool = False 30 while curr_token is not None and \ 31 curr_token is not closing: 32 if not isinstance(curr_token, Token): 33 continue 34 if curr_token.str == ',': 35 continue_outer = True 36 break 37 params.append(curr_token) 38 curr_token = curr_token.next 39 if continue_outer or \ 40 len(params) != 2: 41 continue 42 43 # checks for violation 44 keyword_token: Token = params[0] 45 name_token: Token = params[1] 46 if keyword_token.str not in self._TARGET_TOKENS: 47 continue 48 name: str = name_token.str 49 if name_token.type != 'name' or \ 50 name == '...': 51 continue 52 if len(name) == 1 and \ 53 name[0].isupper(): 54 continue 55 self.report_error(name_token, Messages.get(self._MSG))
class
IdentifierNameChecker(rulebook_cppcheck.checkers.rulebook_checkers.RulebookTokenChecker):
13class IdentifierNameChecker(RulebookTokenChecker): 14 """See detail: https://hanggrian.github.io/rulebook/rules/#identifier-name""" 15 MSG: str = 'identifier.name' 16 ID: str = 'identifier-name' 17 18 _SNAKE_CASE_REGEX: Pattern = re(r'(?<!^)(?=[A-Z][a-z])|(?<=[a-z])(?=[A-Z])') 19 20 def process_tokens(self, tokens: list[Token]) -> None: 21 for token in tokens: 22 if token.variable and \ 23 token is token.variable.nameToken: 24 self._process(token) 25 if token.function and \ 26 token is token.function.tokenDef and \ 27 token.str != token.scope.className: 28 self._process(token) 29 30 def _process(self, token: Token) -> None: 31 # checks for violation 32 name: str = token.str 33 if all(c.isupper() or c == '_' for c in name) or \ 34 not any(c.isupper() for c in name): 35 return 36 self.report_error( 37 token, 38 Messages.get( 39 self.MSG, 40 sub(r'_+', '_', self._SNAKE_CASE_REGEX.sub('_', name).lower()), 41 ), 42 )
def
process_tokens(self, tokens: list[cppcheck.Cppcheck.addons.cppcheckdata.Token]) -> None:
20 def process_tokens(self, tokens: list[Token]) -> None: 21 for token in tokens: 22 if token.variable and \ 23 token is token.variable.nameToken: 24 self._process(token) 25 if token.function and \ 26 token is token.function.tokenDef and \ 27 token.str != token.scope.className: 28 self._process(token)
class
IllegalCatchChecker(rulebook_cppcheck.checkers.rulebook_checkers.RulebookTokenChecker):
12class IllegalCatchChecker(RulebookTokenChecker): 13 """See detail: https://hanggrian.github.io/rulebook/rules/#illegal-catch""" 14 ID: str = 'illegal-catch' 15 _MSG: str = 'illegal.catch' 16 17 _ILLEGAL_EXCEPTIONS: frozenset[str] = \ 18 frozenset([ 19 'exception', 20 'std::exception', 21 ]) 22 23 def process_tokens(self, tokens: list[Token]) -> None: 24 for token in [t for t in tokens if t.str == 'catch']: 25 next_token: Token | None = token.next 26 if next_token is None or \ 27 next_token.str != '(': 28 continue 29 30 # checks for violation 31 ellipses_token: Token | None = next_token.next 32 if ellipses_token is not None and \ 33 ellipses_token.str == '...': 34 self.report_error(token, Messages.get(self._MSG)) 35 continue 36 if not next_sibling(next_token.next, lambda t: t.str in self._ILLEGAL_EXCEPTIONS): 37 continue 38 self.report_error(token, Messages.get(self._MSG))
def
process_tokens(self, tokens: list[cppcheck.Cppcheck.addons.cppcheckdata.Token]) -> None:
23 def process_tokens(self, tokens: list[Token]) -> None: 24 for token in [t for t in tokens if t.str == 'catch']: 25 next_token: Token | None = token.next 26 if next_token is None or \ 27 next_token.str != '(': 28 continue 29 30 # checks for violation 31 ellipses_token: Token | None = next_token.next 32 if ellipses_token is not None and \ 33 ellipses_token.str == '...': 34 self.report_error(token, Messages.get(self._MSG)) 35 continue 36 if not next_sibling(next_token.next, lambda t: t.str in self._ILLEGAL_EXCEPTIONS): 37 continue 38 self.report_error(token, Messages.get(self._MSG))
class
IllegalThrowChecker(rulebook_cppcheck.checkers.rulebook_checkers.RulebookTokenChecker):
12class IllegalThrowChecker(RulebookTokenChecker): 13 """See detail: https://hanggrian.github.io/rulebook/rules/#illegal-throw""" 14 ID: str = 'illegal-throw' 15 _MSG: str = 'illegal.throw' 16 17 _BROAD_EXCEPTIONS: frozenset[str] = \ 18 frozenset([ 19 'exception', 20 'std::exception', 21 ]) 22 23 def process_tokens(self, tokens: list[Token]) -> None: 24 # checks for violation 25 for token in [t for t in tokens if t.str == 'throw']: 26 target: Token | None = \ 27 next_sibling( 28 token.next, 29 lambda t: t.str in self._BROAD_EXCEPTIONS or t.str == ';', 30 ) 31 if target is None or \ 32 target.str not in self._BROAD_EXCEPTIONS: 33 continue 34 self.report_error(target, Messages.get(self._MSG))
def
process_tokens(self, tokens: list[cppcheck.Cppcheck.addons.cppcheckdata.Token]) -> None:
23 def process_tokens(self, tokens: list[Token]) -> None: 24 # checks for violation 25 for token in [t for t in tokens if t.str == 'throw']: 26 target: Token | None = \ 27 next_sibling( 28 token.next, 29 lambda t: t.str in self._BROAD_EXCEPTIONS or t.str == ';', 30 ) 31 if target is None or \ 32 target.str not in self._BROAD_EXCEPTIONS: 33 continue 34 self.report_error(target, Messages.get(self._MSG))
class
IllegalVariableNameChecker(rulebook_cppcheck.checkers.rulebook_checkers.RulebookTokenChecker):
12class IllegalVariableNameChecker(RulebookTokenChecker): 13 """See detail: https://hanggrian.github.io/rulebook/rules/#illegal-variable-name""" 14 ID: str = 'illegal-variable-name' 15 _MSG: str = 'illegal.variable.name' 16 ARGS: list[str] = [ILLEGAL_VARIABLE_NAMES_OPTION] 17 18 def __init__(self) -> None: 19 super().__init__() 20 self._illegal_variable_names: set[str] = \ 21 {'integer', 'string', 'integers', 'strings'} 22 self._reported_variables: set[str] = set() 23 24 def before_run(self, args: dict[str, str]) -> None: 25 self._illegal_variable_names = \ 26 set(args[ILLEGAL_VARIABLE_NAMES_OPTION].split(',')) 27 28 def process_tokens(self, tokens: list[Token]) -> None: 29 # checks for violation 30 for token in [t for t in tokens if t.variable]: 31 if token.str not in self._illegal_variable_names: 32 continue 33 token_id: str = token.variableId 34 if token_id in self._reported_variables: 35 continue 36 self._reported_variables.add(token_id) 37 self.report_error(token, Messages.get(self._MSG))
def
process_tokens(self, tokens: list[cppcheck.Cppcheck.addons.cppcheckdata.Token]) -> None:
28 def process_tokens(self, tokens: list[Token]) -> None: 29 # checks for violation 30 for token in [t for t in tokens if t.variable]: 31 if token.str not in self._illegal_variable_names: 32 continue 33 token_id: str = token.variableId 34 if token_id in self._reported_variables: 35 continue 36 self._reported_variables.add(token_id) 37 self.report_error(token, Messages.get(self._MSG))
class
ImportOrderChecker(rulebook_cppcheck.checkers.rulebook_checkers.RulebookFileChecker):
13class ImportOrderChecker(RulebookFileChecker): 14 """See detail: https://hanggrian.github.io/rulebook/rules/#import-order""" 15 ID: str = 'import-order' 16 _MSG_SORT: str = 'import.order.sort' 17 _MSG_JOIN: str = 'import.order.join' 18 19 _INCLUDE_REGEX: Pattern = re(r'#\s*include\s*([<"])(.+?)([>"])') 20 21 def check_file(self, token: Token, content: str) -> None: 22 prev_is_quoted: bool | None = None 23 prev_lineno: int | None = None 24 prev_path: str | None = None 25 26 for lineno, line in enumerate(content.splitlines(), 1): 27 # distinguish between bracket and quote imports 28 match: Match | None = self._INCLUDE_REGEX.search(line.strip()) 29 if match is None: 30 continue 31 is_quoted: bool = match.group(1) == '"' 32 path: str = match.group(2).strip() 33 34 # checks for violation 35 if prev_lineno is not None: 36 if not is_quoted and \ 37 prev_is_quoted: 38 self.report_error( 39 token, 40 Messages.get(self._MSG_SORT, path, prev_path), 41 lineno, 42 ) 43 elif is_quoted == prev_is_quoted: 44 if path < prev_path: 45 self.report_error( 46 token, 47 Messages.get(self._MSG_SORT, path, prev_path), 48 lineno, 49 ) 50 if lineno != prev_lineno + 1: 51 self.report_error( 52 token, 53 Messages.get(self._MSG_JOIN, path), 54 lineno, 55 ) 56 57 prev_is_quoted = is_quoted 58 prev_lineno = lineno 59 prev_path = path
def
check_file( self, token: cppcheck.Cppcheck.addons.cppcheckdata.Token, content: str) -> None:
21 def check_file(self, token: Token, content: str) -> None: 22 prev_is_quoted: bool | None = None 23 prev_lineno: int | None = None 24 prev_path: str | None = None 25 26 for lineno, line in enumerate(content.splitlines(), 1): 27 # distinguish between bracket and quote imports 28 match: Match | None = self._INCLUDE_REGEX.search(line.strip()) 29 if match is None: 30 continue 31 is_quoted: bool = match.group(1) == '"' 32 path: str = match.group(2).strip() 33 34 # checks for violation 35 if prev_lineno is not None: 36 if not is_quoted and \ 37 prev_is_quoted: 38 self.report_error( 39 token, 40 Messages.get(self._MSG_SORT, path, prev_path), 41 lineno, 42 ) 43 elif is_quoted == prev_is_quoted: 44 if path < prev_path: 45 self.report_error( 46 token, 47 Messages.get(self._MSG_SORT, path, prev_path), 48 lineno, 49 ) 50 if lineno != prev_lineno + 1: 51 self.report_error( 52 token, 53 Messages.get(self._MSG_JOIN, path), 54 lineno, 55 ) 56 57 prev_is_quoted = is_quoted 58 prev_lineno = lineno 59 prev_path = path
class
IndentStyleChecker(rulebook_cppcheck.checkers.rulebook_checkers.RulebookTokenChecker):
12class IndentStyleChecker(RulebookTokenChecker): 13 """See detail: https://hanggrian.github.io/rulebook/rules/#indent-style""" 14 ID: str = 'indent-style' 15 _MSG: str = 'indent.style' 16 ARGS: list[str] = [INDENT_STYLE_OPTION] 17 18 def __init__(self) -> None: 19 super().__init__() 20 self._indent_size: int = 4 21 22 def before_run(self, args: dict[str, str]) -> None: 23 self._indent_size = int(args[INDENT_STYLE_OPTION]) 24 25 def process_tokens(self, tokens: list[Token]) -> None: 26 processed_lines: dict[str, set[int]] = {} 27 all_lines: dict[str, list[str]] = {} 28 for token in [t for t in tokens if t.file and t.linenr]: 29 token_file: str | None = token.file 30 token_linenr: int = token.linenr 31 32 if token_file is None: 33 continue 34 if token_file not in processed_lines: 35 processed_lines[token_file] = set() 36 if token_linenr in processed_lines[token_file]: 37 continue 38 if token_file not in all_lines: 39 with open(token_file, 'r', encoding='UTF-8') as f: 40 all_lines[token_file] = f.readlines() 41 42 # checks for violation 43 processed_lines[token_file].add(token_linenr) 44 line = all_lines[token_file][token_linenr - 1] 45 line_stripped = line.lstrip(' ') 46 if not line_stripped.strip() or \ 47 line_stripped.startswith('*') or \ 48 (len(line) - len(line_stripped)) % self._indent_size == 0: 49 continue 50 self.report_error(token, Messages.get(self._MSG, self._indent_size))
def
process_tokens(self, tokens: list[cppcheck.Cppcheck.addons.cppcheckdata.Token]) -> None:
25 def process_tokens(self, tokens: list[Token]) -> None: 26 processed_lines: dict[str, set[int]] = {} 27 all_lines: dict[str, list[str]] = {} 28 for token in [t for t in tokens if t.file and t.linenr]: 29 token_file: str | None = token.file 30 token_linenr: int = token.linenr 31 32 if token_file is None: 33 continue 34 if token_file not in processed_lines: 35 processed_lines[token_file] = set() 36 if token_linenr in processed_lines[token_file]: 37 continue 38 if token_file not in all_lines: 39 with open(token_file, 'r', encoding='UTF-8') as f: 40 all_lines[token_file] = f.readlines() 41 42 # checks for violation 43 processed_lines[token_file].add(token_linenr) 44 line = all_lines[token_file][token_linenr - 1] 45 line_stripped = line.lstrip(' ') 46 if not line_stripped.strip() or \ 47 line_stripped.startswith('*') or \ 48 (len(line) - len(line_stripped)) % self._indent_size == 0: 49 continue 50 self.report_error(token, Messages.get(self._MSG, self._indent_size))
class
InnerClassPositionChecker(rulebook_cppcheck.checkers.rulebook_checkers.RulebookTokenChecker):
11class InnerClassPositionChecker(RulebookTokenChecker): 12 """See detail: https://hanggrian.github.io/rulebook/rules/#inner-class-position""" 13 ID: str = 'inner-class-position' 14 _MSG: str = 'inner.class.position' 15 16 _TARGET_SCOPES: frozenset[str] = frozenset(['Class', 'Struct']) 17 _TARGET_TOKENS: frozenset[str] = frozenset(['class', 'struct']) 18 19 def process_tokens(self, tokens: list[Token]) -> None: 20 for token in [ 21 t for t in tokens 22 if t.scope and 23 t.scope.type in self._TARGET_SCOPES and 24 t is t.scope.bodyStart 25 ]: 26 # consider only inner class 27 current_class_scope: Scope = token.scope 28 has_seen_inner_class: bool = False 29 curr_token: Token | None = token.next 30 continue_outer: int = False 31 while curr_token is not None and \ 32 curr_token is not current_class_scope.bodyEnd: 33 if not isinstance(curr_token, Token): 34 continue 35 if curr_token.str in self._TARGET_TOKENS and \ 36 curr_token.next: 37 if curr_token.next.typeScope and \ 38 curr_token.next.typeScope.nestedIn is current_class_scope: 39 has_seen_inner_class = True 40 if has_seen_inner_class: 41 # checks for violation 42 if self._is_member(curr_token, current_class_scope): 43 self.report_error(curr_token, Messages.get(self._MSG)) 44 continue_outer = True 45 break 46 curr_token = curr_token.next 47 if continue_outer: 48 continue 49 50 @staticmethod 51 def _is_member(token: Token, scope: Scope) -> bool: 52 if token.scope is not scope: 53 return False 54 if token.function and \ 55 token is token.function.tokenDef: 56 return True 57 if token.variable and \ 58 token is token.variable.nameToken: 59 return True 60 return False
def
process_tokens(self, tokens: list[cppcheck.Cppcheck.addons.cppcheckdata.Token]) -> None:
19 def process_tokens(self, tokens: list[Token]) -> None: 20 for token in [ 21 t for t in tokens 22 if t.scope and 23 t.scope.type in self._TARGET_SCOPES and 24 t is t.scope.bodyStart 25 ]: 26 # consider only inner class 27 current_class_scope: Scope = token.scope 28 has_seen_inner_class: bool = False 29 curr_token: Token | None = token.next 30 continue_outer: int = False 31 while curr_token is not None and \ 32 curr_token is not current_class_scope.bodyEnd: 33 if not isinstance(curr_token, Token): 34 continue 35 if curr_token.str in self._TARGET_TOKENS and \ 36 curr_token.next: 37 if curr_token.next.typeScope and \ 38 curr_token.next.typeScope.nestedIn is current_class_scope: 39 has_seen_inner_class = True 40 if has_seen_inner_class: 41 # checks for violation 42 if self._is_member(curr_token, current_class_scope): 43 self.report_error(curr_token, Messages.get(self._MSG)) 44 continue_outer = True 45 break 46 curr_token = curr_token.next 47 if continue_outer: 48 continue
class
LineLengthChecker(rulebook_cppcheck.checkers.rulebook_checkers.RulebookTokenChecker):
12class LineLengthChecker(RulebookTokenChecker): 13 """See detail: https://hanggrian.github.io/rulebook/rules/#line-length""" 14 ID: str = 'line-length' 15 _MSG: str = 'line.length' 16 ARGS: list[str] = [MAX_LINE_LENGTH_OPTION] 17 18 def __init__(self) -> None: 19 super().__init__() 20 self._max_line_length: int = 100 21 22 def before_run(self, args: dict[str, str]) -> None: 23 self._max_line_length = int(args[MAX_LINE_LENGTH_OPTION]) 24 25 def process_tokens(self, tokens: list[Token]) -> None: 26 # checks for violation 27 processed_lines: dict[str, set[int]] = {} 28 cache: dict[str, list[str]] = {} 29 for token in [t for t in tokens if t.file and t.linenr]: 30 token_file: str = token.file 31 token_linenr: int = token.linenr 32 33 if token_file not in processed_lines: 34 processed_lines[token_file] = set() 35 if token_linenr in processed_lines[token_file]: 36 continue 37 if token_file not in cache: 38 with open(token_file, 'r', encoding='UTF-8') as f: 39 cache[token_file] = f.readlines() 40 processed_lines[token_file].add(token_linenr) 41 if len(cache[token_file][token_linenr - 1].rstrip('\r\n')) <= self._max_line_length: 42 continue 43 self.report_error(token, Messages.get(self._MSG, self._max_line_length))
def
process_tokens(self, tokens: list[cppcheck.Cppcheck.addons.cppcheckdata.Token]) -> None:
25 def process_tokens(self, tokens: list[Token]) -> None: 26 # checks for violation 27 processed_lines: dict[str, set[int]] = {} 28 cache: dict[str, list[str]] = {} 29 for token in [t for t in tokens if t.file and t.linenr]: 30 token_file: str = token.file 31 token_linenr: int = token.linenr 32 33 if token_file not in processed_lines: 34 processed_lines[token_file] = set() 35 if token_linenr in processed_lines[token_file]: 36 continue 37 if token_file not in cache: 38 with open(token_file, 'r', encoding='UTF-8') as f: 39 cache[token_file] = f.readlines() 40 processed_lines[token_file].add(token_linenr) 41 if len(cache[token_file][token_linenr - 1].rstrip('\r\n')) <= self._max_line_length: 42 continue 43 self.report_error(token, Messages.get(self._MSG, self._max_line_length))
class
LonelyCaseChecker(rulebook_cppcheck.checkers.rulebook_checkers.RulebookChecker):
12class LonelyCaseChecker(RulebookChecker): 13 """See detail: https://hanggrian.github.io/rulebook/rules/#lonely-case""" 14 ID: str = 'lonely-case' 15 _MSG: str = 'lonely.case' 16 17 def get_scopeset(self) -> set[str]: 18 return {'Switch'} 19 20 def visit_scope(self, scope: Scope) -> None: 21 # checks for violation 22 case_count: int = 0 23 curr_token: Token | None = scope.bodyStart 24 while curr_token is not None and \ 25 curr_token is not scope.bodyEnd: 26 if not isinstance(curr_token, Token): 27 continue 28 if curr_token.str == 'case': 29 case_count += 1 30 curr_token = curr_token.next 31 if case_count > 1: 32 return 33 switch_token: Token | None = prev_sibling(scope.bodyStart, lambda t: t.str == 'switch') 34 if switch_token is None: 35 return 36 self.report_error(switch_token, Messages.get(self._MSG))
def
visit_scope(self, scope: cppcheck.Cppcheck.addons.cppcheckdata.Scope) -> None:
20 def visit_scope(self, scope: Scope) -> None: 21 # checks for violation 22 case_count: int = 0 23 curr_token: Token | None = scope.bodyStart 24 while curr_token is not None and \ 25 curr_token is not scope.bodyEnd: 26 if not isinstance(curr_token, Token): 27 continue 28 if curr_token.str == 'case': 29 case_count += 1 30 curr_token = curr_token.next 31 if case_count > 1: 32 return 33 switch_token: Token | None = prev_sibling(scope.bodyStart, lambda t: t.str == 'switch') 34 if switch_token is None: 35 return 36 self.report_error(switch_token, Messages.get(self._MSG))
class
LonelyIfChecker(rulebook_cppcheck.checkers.rulebook_checkers.RulebookTokenChecker):
11class LonelyIfChecker(RulebookTokenChecker): 12 """See detail: https://hanggrian.github.io/rulebook/rules/#lonely-if""" 13 ID: str = 'lonely-if' 14 _MSG: str = 'lonely.if' 15 16 def process_tokens(self, tokens: list[Token]) -> None: 17 for token in tokens: 18 # checks for violation 19 if token.str != 'else' or \ 20 token.next is None or \ 21 token.next.str != '{': 22 continue 23 inner_if: Token | None = self._sole_if_in_block(token.next) 24 if inner_if is None: 25 return 26 self.report_error(inner_if, Messages.get(self._MSG)) 27 28 @staticmethod 29 def _skip_statement(token: Token | None) -> Token | None: 30 if token is None: 31 return None 32 if token.str == '{': 33 return token.link.next if token.link else None 34 if token.str == 'if': 35 paren: Token | None = token.next 36 if paren is None or paren.str != '(': 37 return None 38 token = LonelyIfChecker._skip_statement(paren.link.next) 39 if token and token.str == 'else': 40 token = LonelyIfChecker._skip_statement(token.next) 41 return token 42 while token is not None and token.str != ';': 43 token = token.next 44 return token.next if token else None 45 46 @staticmethod 47 def _sole_if_in_block(open_brace: Token) -> Token | None: 48 first: Token | None = open_brace.next 49 if first is None or first.str != 'if': 50 return None 51 if open_brace.linenr == first.linenr: 52 return None 53 return first if LonelyIfChecker._skip_statement(first) is open_brace.link else None
def
process_tokens(self, tokens: list[cppcheck.Cppcheck.addons.cppcheckdata.Token]) -> None:
16 def process_tokens(self, tokens: list[Token]) -> None: 17 for token in tokens: 18 # checks for violation 19 if token.str != 'else' or \ 20 token.next is None or \ 21 token.next.str != '{': 22 continue 23 inner_if: Token | None = self._sole_if_in_block(token.next) 24 if inner_if is None: 25 return 26 self.report_error(inner_if, Messages.get(self._MSG))
class
LowercaseFChecker(rulebook_cppcheck.checkers.rulebook_checkers.RulebookTokenChecker):
11class LowercaseFChecker(RulebookTokenChecker): 12 """See detail: https://hanggrian.github.io/rulebook/rules/#lowercase-f""" 13 ID: str = 'lowercase-f' 14 _MSG: str = 'lowercase.f' 15 16 def process_tokens(self, tokens: list[Token]) -> None: 17 # checks for violation 18 for token in [t for t in tokens if t.isNumber or t.isName]: 19 if 'F' not in token.str: 20 continue 21 if not token.str.replace('.', '').replace('F', '').replace('f', '').isnumeric(): 22 continue 23 self.report_error(token, Messages.get(self._MSG))
def
process_tokens(self, tokens: list[cppcheck.Cppcheck.addons.cppcheckdata.Token]) -> None:
16 def process_tokens(self, tokens: list[Token]) -> None: 17 # checks for violation 18 for token in [t for t in tokens if t.isNumber or t.isName]: 19 if 'F' not in token.str: 20 continue 21 if not token.str.replace('.', '').replace('F', '').replace('f', '').isnumeric(): 22 continue 23 self.report_error(token, Messages.get(self._MSG))
class
LowercaseHexadecimalChecker(rulebook_cppcheck.checkers.rulebook_checkers.RulebookTokenChecker):
11class LowercaseHexadecimalChecker(RulebookTokenChecker): 12 """See detail: https://hanggrian.github.io/rulebook/rules/#lowercase-hexadecimal""" 13 ID: str = 'lowercase-hexadecimal' 14 _MSG: str = 'lowercase.hexadecimal' 15 16 def process_tokens(self, tokens: list[Token]) -> None: 17 # checks for violation 18 for token in [t for t in tokens if t.isNumber]: 19 value: str = token.str 20 if not value.lower().startswith('0x'): 21 return 22 value_replacement: str = value.lower() 23 if value == value_replacement: 24 return 25 self.report_error(token, Messages.get(self._MSG, value_replacement))
def
process_tokens(self, tokens: list[cppcheck.Cppcheck.addons.cppcheckdata.Token]) -> None:
16 def process_tokens(self, tokens: list[Token]) -> None: 17 # checks for violation 18 for token in [t for t in tokens if t.isNumber]: 19 value: str = token.str 20 if not value.lower().startswith('0x'): 21 return 22 value_replacement: str = value.lower() 23 if value == value_replacement: 24 return 25 self.report_error(token, Messages.get(self._MSG, value_replacement))
class
MeaninglessWordChecker(rulebook_cppcheck.checkers.rulebook_checkers.RulebookChecker):
15class MeaninglessWordChecker(RulebookChecker): 16 """See detail: https://hanggrian.github.io/rulebook/rules/#meaningless-word""" 17 ID: str = 'meaningless-word' 18 _MSG: str = 'meaningless.word' 19 ARGS: list[str] = [MEANINGLESS_WORDS_OPTION] 20 21 _TITLE_CASE_REGEX: Pattern = \ 22 re( 23 r'((^[a-z]+)|([0-9]+)|([A-Z]{1}[a-z]+)|' 24 r'([A-Z]+(?=([A-Z][a-z])|($)|([0-9]))))', 25 ) 26 27 def __init__(self) -> None: 28 super().__init__() 29 self._words: set[str] = \ 30 {'Util', 'Utility', 'Helper', 'Manager', 'Wrapper'} 31 32 def before_run(self, args: dict[str, str]) -> None: 33 self._words = \ 34 set(args[MEANINGLESS_WORDS_OPTION].split(',')) 35 36 def get_scopeset(self) -> set[str]: 37 return {'Class', 'Struct', 'Union', 'Enum'} 38 39 def visit_scope(self, scope: Scope) -> None: 40 # checks for violation 41 class_name: str | None = scope.className 42 if class_name is None: 43 return 44 words: list[str] = [match[0] for match in self._TITLE_CASE_REGEX.findall(class_name)] 45 if not words or \ 46 words[-1] not in self._words: 47 return 48 name_token: Token | None = prev_sibling(scope.bodyStart, lambda t: t.str == class_name) 49 self.report_error( 50 name_token if name_token is not None else scope.bodyStart, 51 Messages.get(self._MSG, words[-1]), 52 )
def
visit_scope(self, scope: cppcheck.Cppcheck.addons.cppcheckdata.Scope) -> None:
39 def visit_scope(self, scope: Scope) -> None: 40 # checks for violation 41 class_name: str | None = scope.className 42 if class_name is None: 43 return 44 words: list[str] = [match[0] for match in self._TITLE_CASE_REGEX.findall(class_name)] 45 if not words or \ 46 words[-1] not in self._words: 47 return 48 name_token: Token | None = prev_sibling(scope.bodyStart, lambda t: t.str == class_name) 49 self.report_error( 50 name_token if name_token is not None else scope.bodyStart, 51 Messages.get(self._MSG, words[-1]), 52 )
class
MemberOrderChecker(rulebook_cppcheck.checkers.rulebook_checkers.RulebookTokenChecker):
12class MemberOrderChecker(RulebookTokenChecker): 13 """See detail: https://hanggrian.github.io/rulebook/rules/#member-order""" 14 ID: str = 'member-order' 15 _MSG: str = 'member.order' 16 ARGS: list[str] = [MEMBER_ORDER_OPTION] 17 18 _TARGET_TOKENS: frozenset[str] = frozenset(['Class', 'Struct']) 19 20 def __init__(self) -> None: 21 super().__init__() 22 self._member_order: list[str] = [ 23 'property', 24 'constructor', 25 'function', 26 'static', 27 ] 28 self._property_position: int = 0 29 self._constructor_position: int = 1 30 self._function_position: int = 2 31 self._static_position: int = 3 32 33 def before_run(self, args: dict[str, str]) -> None: 34 self._member_order = args[MEMBER_ORDER_OPTION].split(',') 35 self._property_position = self._member_order.index('property') 36 self._constructor_position = self._member_order.index('constructor') 37 self._function_position = self._member_order.index('function') 38 self._static_position = self._member_order.index('static') 39 40 def process_tokens(self, tokens: list[Token]) -> None: 41 # checks for violation 42 for token in [ 43 t for t in tokens 44 if t.scope and 45 t.scope.type in self._TARGET_TOKENS and 46 t is t.scope.bodyStart 47 ]: 48 prev_weight: int | None = None 49 prev_name: str | None = None 50 curr_token: Token | None = token.next 51 while curr_token is not None and \ 52 curr_token is not token.scope.bodyEnd: 53 if not isinstance(curr_token, Token): 54 continue 55 info: tuple[int, str] | None = self._get_member_info(curr_token, token.scope) 56 if info is not None: 57 curr_weight, curr_name = info 58 if prev_weight is not None and \ 59 curr_weight < prev_weight: 60 self.report_error( 61 curr_token, 62 Messages.get(self._MSG, curr_name, prev_name), 63 ) 64 prev_weight = curr_weight 65 prev_name = curr_name 66 curr_token = curr_token.next 67 68 def _get_member_info(self, token: Token, scope: Scope) -> tuple[int, str] | None: 69 if token.scope is not scope: 70 return None 71 if token.function is not None: 72 func: Function = token.function 73 if token is not func.tokenDef: 74 return None 75 if func.isStatic: 76 return self._static_position, 'static member' 77 if scope.className == func.name: 78 return self._constructor_position, 'constructor' 79 return self._function_position, 'function' 80 if token.variable is not None: 81 var: Variable = token.variable 82 if token is not var.nameToken or \ 83 var.isArgument: 84 return None 85 if var.isStatic: 86 return self._static_position, 'static member' 87 return self._property_position, 'property' 88 return None
def
before_run(self, args: dict[str, str]) -> None:
33 def before_run(self, args: dict[str, str]) -> None: 34 self._member_order = args[MEMBER_ORDER_OPTION].split(',') 35 self._property_position = self._member_order.index('property') 36 self._constructor_position = self._member_order.index('constructor') 37 self._function_position = self._member_order.index('function') 38 self._static_position = self._member_order.index('static')
def
process_tokens(self, tokens: list[cppcheck.Cppcheck.addons.cppcheckdata.Token]) -> None:
40 def process_tokens(self, tokens: list[Token]) -> None: 41 # checks for violation 42 for token in [ 43 t for t in tokens 44 if t.scope and 45 t.scope.type in self._TARGET_TOKENS and 46 t is t.scope.bodyStart 47 ]: 48 prev_weight: int | None = None 49 prev_name: str | None = None 50 curr_token: Token | None = token.next 51 while curr_token is not None and \ 52 curr_token is not token.scope.bodyEnd: 53 if not isinstance(curr_token, Token): 54 continue 55 info: tuple[int, str] | None = self._get_member_info(curr_token, token.scope) 56 if info is not None: 57 curr_weight, curr_name = info 58 if prev_weight is not None and \ 59 curr_weight < prev_weight: 60 self.report_error( 61 curr_token, 62 Messages.get(self._MSG, curr_name, prev_name), 63 ) 64 prev_weight = curr_weight 65 prev_name = curr_name 66 curr_token = curr_token.next
class
MemberSeparatorChecker(rulebook_cppcheck.checkers.rulebook_checkers.RulebookChecker):
13class MemberSeparatorChecker(RulebookChecker): 14 """See detail: https://hanggrian.github.io/rulebook/rules/#member-separator""" 15 ID: str = 'member-separator' 16 _MSG: str = 'member.separator' 17 18 _OPENING_TOKENS: frozenset[str] = frozenset(['{', ';']) 19 20 def get_scopeset(self) -> set[str]: 21 return {'Class', 'Struct'} 22 23 def visit_scope(self, scope: Scope) -> None: 24 # get member end token 25 members: list[tuple[Token, Token, bool]] = [] 26 body_start: Token | None = scope.bodyStart 27 body_end: Token | None = scope.bodyEnd 28 if body_start is None or \ 29 body_end is None: 30 return 31 curr_token: Token | None = body_start.next 32 while curr_token is not None and \ 33 curr_token is not body_end: 34 if not isinstance(curr_token, Token): 35 continue 36 if curr_token.scope is scope and \ 37 (curr_token.variable or curr_token.function): 38 is_var: bool = curr_token.variable is not None 39 start_token: Token = curr_token 40 end_token: Token = curr_token 41 if not is_var and \ 42 curr_token.function and \ 43 curr_token.function.tokenDef: 44 search = \ 45 next_sibling( 46 curr_token.function.tokenDef, 47 lambda t: t.str in self._OPENING_TOKENS or t is body_end, 48 ) 49 if search and \ 50 search.str == '{' and \ 51 search.link: 52 end_token = search.link 53 elif search and \ 54 search.str == ';': 55 end_token = search 56 else: 57 search = \ 58 next_sibling( 59 curr_token, 60 lambda t: \ 61 t.str == ';' or \ 62 not t.next or \ 63 t.next.scope is not scope, 64 ) 65 if search: 66 end_token = search 67 members.append((start_token, end_token, is_var)) 68 curr_token = end_token.next 69 continue 70 curr_token = curr_token.next 71 72 # checks for violation 73 for i in range(1, len(members)): 74 prev_start, prev_end, prev_is_var = members[i - 1] 75 current_start, _, curr_is_var = members[i] 76 if prev_is_var and \ 77 curr_is_var: 78 continue 79 if current_start.linenr - prev_end.linenr >= 2: 80 continue 81 msg_arg: str = 'property' if prev_is_var else 'function' 82 if not prev_is_var and \ 83 prev_start.str == scope.className: 84 msg_arg = 'constructor' 85 self.report_error(prev_end, Messages.get(self._MSG, msg_arg))
def
visit_scope(self, scope: cppcheck.Cppcheck.addons.cppcheckdata.Scope) -> None:
23 def visit_scope(self, scope: Scope) -> None: 24 # get member end token 25 members: list[tuple[Token, Token, bool]] = [] 26 body_start: Token | None = scope.bodyStart 27 body_end: Token | None = scope.bodyEnd 28 if body_start is None or \ 29 body_end is None: 30 return 31 curr_token: Token | None = body_start.next 32 while curr_token is not None and \ 33 curr_token is not body_end: 34 if not isinstance(curr_token, Token): 35 continue 36 if curr_token.scope is scope and \ 37 (curr_token.variable or curr_token.function): 38 is_var: bool = curr_token.variable is not None 39 start_token: Token = curr_token 40 end_token: Token = curr_token 41 if not is_var and \ 42 curr_token.function and \ 43 curr_token.function.tokenDef: 44 search = \ 45 next_sibling( 46 curr_token.function.tokenDef, 47 lambda t: t.str in self._OPENING_TOKENS or t is body_end, 48 ) 49 if search and \ 50 search.str == '{' and \ 51 search.link: 52 end_token = search.link 53 elif search and \ 54 search.str == ';': 55 end_token = search 56 else: 57 search = \ 58 next_sibling( 59 curr_token, 60 lambda t: \ 61 t.str == ';' or \ 62 not t.next or \ 63 t.next.scope is not scope, 64 ) 65 if search: 66 end_token = search 67 members.append((start_token, end_token, is_var)) 68 curr_token = end_token.next 69 continue 70 curr_token = curr_token.next 71 72 # checks for violation 73 for i in range(1, len(members)): 74 prev_start, prev_end, prev_is_var = members[i - 1] 75 current_start, _, curr_is_var = members[i] 76 if prev_is_var and \ 77 curr_is_var: 78 continue 79 if current_start.linenr - prev_end.linenr >= 2: 80 continue 81 msg_arg: str = 'property' if prev_is_var else 'function' 82 if not prev_is_var and \ 83 prev_start.str == scope.className: 84 msg_arg = 'constructor' 85 self.report_error(prev_end, Messages.get(self._MSG, msg_arg))
class
OperatorWrapChecker(rulebook_cppcheck.checkers.rulebook_checkers.RulebookTokenChecker):
12class OperatorWrapChecker(RulebookTokenChecker): 13 """See detail: https://hanggrian.github.io/rulebook/rules/#operator-wrap""" 14 ID: str = 'operator-wrap' 15 _MSG_MISSING: str = 'operator.wrap.missing' 16 _MSG_UNEXPECTED: str = 'operator.wrap.unexpected' 17 18 _IGNORE_TOKENS: frozenset[str] = \ 19 frozenset(['(', ')', '[', ']', '{', '}', ',', '.', '::', '?', ':']) 20 21 def process_tokens(self, tokens: list[Token]) -> None: 22 for token in [ 23 t for t in tokens 24 if t.isOp and 25 not t.isAssignmentOp and 26 (t.astOperand1 is not None and t.astOperand2 is not None) and 27 t.str not in self._IGNORE_TOKENS 28 ]: 29 # target multiline statement 30 if token.previous is not None and \ 31 token.linenr > token.previous.linenr: 32 self.report_error(token, Messages.get(self._MSG_UNEXPECTED, token.str)) 33 continue 34 35 next_token: Token | None = token.next 36 if next_token is None or \ 37 next_token.str in {'{', '['}: 38 continue 39 40 top_node: Token | None = parent(token, lambda t: t.isOp and not t.isAssignmentOp) 41 start_token: Token | None = top_node 42 while start_token is not None and start_token.astOperand1 is not None: 43 start_token = start_token.astOperand1 44 end_token: Token | None = top_node 45 while end_token is not None and end_token.astOperand2 is not None: 46 end_token = end_token.astOperand2 47 48 # checks for violation 49 if not isinstance(start_token, Token) or \ 50 not isinstance(end_token, Token) or \ 51 end_token.linenr <= start_token.linenr or \ 52 next_token.linenr != token.linenr: 53 continue 54 self.report_error(token, Messages.get(self._MSG_MISSING, token.str))
def
process_tokens(self, tokens: list[cppcheck.Cppcheck.addons.cppcheckdata.Token]) -> None:
21 def process_tokens(self, tokens: list[Token]) -> None: 22 for token in [ 23 t for t in tokens 24 if t.isOp and 25 not t.isAssignmentOp and 26 (t.astOperand1 is not None and t.astOperand2 is not None) and 27 t.str not in self._IGNORE_TOKENS 28 ]: 29 # target multiline statement 30 if token.previous is not None and \ 31 token.linenr > token.previous.linenr: 32 self.report_error(token, Messages.get(self._MSG_UNEXPECTED, token.str)) 33 continue 34 35 next_token: Token | None = token.next 36 if next_token is None or \ 37 next_token.str in {'{', '['}: 38 continue 39 40 top_node: Token | None = parent(token, lambda t: t.isOp and not t.isAssignmentOp) 41 start_token: Token | None = top_node 42 while start_token is not None and start_token.astOperand1 is not None: 43 start_token = start_token.astOperand1 44 end_token: Token | None = top_node 45 while end_token is not None and end_token.astOperand2 is not None: 46 end_token = end_token.astOperand2 47 48 # checks for violation 49 if not isinstance(start_token, Token) or \ 50 not isinstance(end_token, Token) or \ 51 end_token.linenr <= start_token.linenr or \ 52 next_token.linenr != token.linenr: 53 continue 54 self.report_error(token, Messages.get(self._MSG_MISSING, token.str))
class
PackageNameChecker(rulebook_cppcheck.checkers.rulebook_checkers.RulebookChecker):
14class PackageNameChecker(RulebookChecker): 15 """See detail: https://hanggrian.github.io/rulebook/rules/#package-name""" 16 ID: str = 'package-name' 17 _MSG: str = 'package.name' 18 19 _SNAKE_CASE_REGEX: Pattern = re(r'(?<!^)(?=[A-Z][a-z])|(?<=[a-z])(?=[A-Z])') 20 21 def get_scopeset(self) -> set[str]: 22 return {'Namespace'} 23 24 def visit_scope(self, scope: Scope) -> None: 25 # checks for violation 26 class_name: str | None = scope.className 27 if class_name is None or \ 28 not any(c.isupper() for c in class_name): 29 return 30 name_token: Token | None = prev_sibling(scope.bodyStart, lambda t: t.str == class_name) 31 self.report_error( 32 name_token if name_token is not None else scope.bodyStart, 33 Messages.get( 34 self._MSG, 35 sub(r'_+', '_', self._SNAKE_CASE_REGEX.sub('_', class_name).lower()), 36 ), 37 )
def
visit_scope(self, scope: cppcheck.Cppcheck.addons.cppcheckdata.Scope) -> None:
24 def visit_scope(self, scope: Scope) -> None: 25 # checks for violation 26 class_name: str | None = scope.className 27 if class_name is None or \ 28 not any(c.isupper() for c in class_name): 29 return 30 name_token: Token | None = prev_sibling(scope.bodyStart, lambda t: t.str == class_name) 31 self.report_error( 32 name_token if name_token is not None else scope.bodyStart, 33 Messages.get( 34 self._MSG, 35 sub(r'_+', '_', self._SNAKE_CASE_REGEX.sub('_', class_name).lower()), 36 ), 37 )
class
ParameterWrapChecker(rulebook_cppcheck.checkers.rulebook_checkers.RulebookTokenChecker):
11class ParameterWrapChecker(RulebookTokenChecker): 12 """See detail: https://hanggrian.github.io/rulebook/rules/#parameter-wrap""" 13 ID: str = 'parameter-wrap' 14 _MSG: str = 'parameter.wrap' 15 16 def process_tokens(self, tokens: list[Token]) -> None: 17 for token in [t for t in tokens if t.str == '(']: 18 # collect parameters 19 params: list[Token] = [] 20 curr_token: Token | None = token.next 21 depth: int = 1 22 while curr_token is not None and \ 23 depth > 0: 24 if curr_token.str == '(': 25 depth += 1 26 elif curr_token.str == ')': 27 depth -= 1 28 if depth == 1 and \ 29 curr_token.str == ',': 30 params.append(curr_token.next) 31 elif depth == 1 and \ 32 not params and \ 33 curr_token.str != ')': 34 params.append(curr_token) 35 curr_token = curr_token.next 36 if not params: 37 continue 38 39 # checks for violation 40 last: Token = params[-1] 41 if token.linenr == last.linenr: 42 continue 43 for i in range(1, len(params)): 44 prev: Token = params[i - 1] 45 curr_param: Token = params[i] 46 if curr_param.linenr != prev.linenr: 47 continue 48 self.report_error(curr_param, Messages.get(self._MSG))
def
process_tokens(self, tokens: list[cppcheck.Cppcheck.addons.cppcheckdata.Token]) -> None:
16 def process_tokens(self, tokens: list[Token]) -> None: 17 for token in [t for t in tokens if t.str == '(']: 18 # collect parameters 19 params: list[Token] = [] 20 curr_token: Token | None = token.next 21 depth: int = 1 22 while curr_token is not None and \ 23 depth > 0: 24 if curr_token.str == '(': 25 depth += 1 26 elif curr_token.str == ')': 27 depth -= 1 28 if depth == 1 and \ 29 curr_token.str == ',': 30 params.append(curr_token.next) 31 elif depth == 1 and \ 32 not params and \ 33 curr_token.str != ')': 34 params.append(curr_token) 35 curr_token = curr_token.next 36 if not params: 37 continue 38 39 # checks for violation 40 last: Token = params[-1] 41 if token.linenr == last.linenr: 42 continue 43 for i in range(1, len(params)): 44 prev: Token = params[i - 1] 45 curr_param: Token = params[i] 46 if curr_param.linenr != prev.linenr: 47 continue 48 self.report_error(curr_param, Messages.get(self._MSG))
class
ParenthesesClipChecker(rulebook_cppcheck.checkers.rulebook_checkers.RulebookTokenChecker):
11class ParenthesesClipChecker(RulebookTokenChecker): 12 """See detail: https://hanggrian.github.io/rulebook/rules/#parentheses-clip""" 13 ID: str = 'parentheses-clip' 14 _MSG: str = 'parentheses.clip' 15 16 _PARENTHESES: dict[str, str] = { 17 '{': '}', 18 '(': ')', 19 '[': ']', 20 } 21 _MULTI_BLOCKS: frozenset[str] = \ 22 frozenset(['if', 'else', 'while', 'for', 'switch', 'try', 'catch', 'do']) 23 _MULTI_BLOCK_STOPS: frozenset[str] = \ 24 frozenset([';', '{', '}']) 25 26 def __init__(self) -> None: 27 super().__init__() 28 self._reported_errors: set[tuple[str, int, int, str]] = set() 29 30 def process_tokens(self, tokens: list[Token]) -> None: 31 # checks for violation 32 for token in tokens: 33 token_str: str = token.str 34 if token_str not in self._PARENTHESES or \ 35 token.isExpandedMacro or \ 36 (token_str == '(' and token.isRemovedVoidParameter): 37 continue 38 closing_token: Token | None = token.link 39 if closing_token is None or \ 40 token.next != closing_token: 41 continue 42 if token.linenr == closing_token.linenr and \ 43 token.column + len(token_str) == closing_token.column: 44 continue 45 if token_str == '(': 46 if token.astParentId is not None: 47 continue 48 elif token_str == '{': 49 if token.astParentId is not None and \ 50 token.astParentId != '0': 51 continue 52 prev: Token | None = token.previous 53 is_control: bool = False 54 while prev is not None: 55 if prev.str in self._MULTI_BLOCKS: 56 is_control = True 57 break 58 if prev.str in self._MULTI_BLOCK_STOPS: 59 break 60 prev = prev.previous 61 if is_control: 62 continue 63 self._report_error_once( 64 token, 65 Messages.get(self._MSG, token_str + self._PARENTHESES[token_str]), 66 ) 67 68 def _report_error_once(self, token: Token, message: str) -> None: 69 error_key: tuple[str, int, int, str] = (token.file, token.linenr, token.column, message) 70 if error_key in self._reported_errors: 71 return 72 self._reported_errors.add(error_key) 73 self.report_error(token, message)
def
process_tokens(self, tokens: list[cppcheck.Cppcheck.addons.cppcheckdata.Token]) -> None:
30 def process_tokens(self, tokens: list[Token]) -> None: 31 # checks for violation 32 for token in tokens: 33 token_str: str = token.str 34 if token_str not in self._PARENTHESES or \ 35 token.isExpandedMacro or \ 36 (token_str == '(' and token.isRemovedVoidParameter): 37 continue 38 closing_token: Token | None = token.link 39 if closing_token is None or \ 40 token.next != closing_token: 41 continue 42 if token.linenr == closing_token.linenr and \ 43 token.column + len(token_str) == closing_token.column: 44 continue 45 if token_str == '(': 46 if token.astParentId is not None: 47 continue 48 elif token_str == '{': 49 if token.astParentId is not None and \ 50 token.astParentId != '0': 51 continue 52 prev: Token | None = token.previous 53 is_control: bool = False 54 while prev is not None: 55 if prev.str in self._MULTI_BLOCKS: 56 is_control = True 57 break 58 if prev.str in self._MULTI_BLOCK_STOPS: 59 break 60 prev = prev.previous 61 if is_control: 62 continue 63 self._report_error_once( 64 token, 65 Messages.get(self._MSG, token_str + self._PARENTHESES[token_str]), 66 )
class
ParenthesesTrimChecker(rulebook_cppcheck.checkers.rulebook_checkers.RulebookTokenChecker):
11class ParenthesesTrimChecker(RulebookTokenChecker): 12 """See detail: https://hanggrian.github.io/rulebook/rules/#parentheses-trim""" 13 ID: str = 'parentheses-trim' 14 _MSG_FIRST: str = 'parentheses.trim.first' 15 _MSG_LAST: str = 'parentheses.trim.last' 16 17 _OPENING_PARENTHESES: frozenset[str] = frozenset(['(', '[', '{', '<']) 18 _CLOSING_PARENTHESES: frozenset[str] = frozenset([')', ']', '}', '>']) 19 20 def process_tokens(self, tokens: list[Token]) -> None: 21 for token in [t for t in tokens if t.file is not None]: 22 # prepare file to for _has_content_between 23 with open(token.file, 'r', encoding='UTF-8') as f: 24 lines: list[str] = f.readlines() 25 26 # find opening and closing parentheses 27 if token.str in self._OPENING_PARENTHESES: 28 next_token: Token | None = token.next 29 if next_token is None or \ 30 next_token.linenr <= token.linenr + 1: 31 continue 32 if self._has_content_between(lines, token, next_token): 33 continue 34 self.report_error( 35 token, 36 Messages.get(self._MSG_FIRST, token.str), 37 token.linenr + 1, 38 ) 39 40 # checks for violation 41 if token.str not in self._CLOSING_PARENTHESES: 42 continue 43 prev_token: Token | None = token.previous 44 if prev_token is None or \ 45 token.linenr <= prev_token.linenr + 1: 46 continue 47 if self._has_content_between(lines, prev_token, token): 48 continue 49 self.report_error( 50 token, 51 Messages.get(self._MSG_LAST, token.str), 52 token.linenr - 1, 53 ) 54 55 @staticmethod 56 def _has_content_between(lines2: list[str], start: Token, end: Token) -> bool: 57 return any(lines2[i].strip() for i in range(start.linenr, end.linenr - 1))
def
process_tokens(self, tokens: list[cppcheck.Cppcheck.addons.cppcheckdata.Token]) -> None:
20 def process_tokens(self, tokens: list[Token]) -> None: 21 for token in [t for t in tokens if t.file is not None]: 22 # prepare file to for _has_content_between 23 with open(token.file, 'r', encoding='UTF-8') as f: 24 lines: list[str] = f.readlines() 25 26 # find opening and closing parentheses 27 if token.str in self._OPENING_PARENTHESES: 28 next_token: Token | None = token.next 29 if next_token is None or \ 30 next_token.linenr <= token.linenr + 1: 31 continue 32 if self._has_content_between(lines, token, next_token): 33 continue 34 self.report_error( 35 token, 36 Messages.get(self._MSG_FIRST, token.str), 37 token.linenr + 1, 38 ) 39 40 # checks for violation 41 if token.str not in self._CLOSING_PARENTHESES: 42 continue 43 prev_token: Token | None = token.previous 44 if prev_token is None or \ 45 token.linenr <= prev_token.linenr + 1: 46 continue 47 if self._has_content_between(lines, prev_token, token): 48 continue 49 self.report_error( 50 token, 51 Messages.get(self._MSG_LAST, token.str), 52 token.linenr - 1, 53 )
class
RedundantDefaultChecker(rulebook_cppcheck.checkers.rulebook_checkers.RulebookTokenChecker):
12class RedundantDefaultChecker(RulebookTokenChecker): 13 """See detail: https://hanggrian.github.io/rulebook/rules/#redundant-default""" 14 ID: str = 'redundant-default' 15 _MSG: str = 'redundant.default' 16 17 _BREAK_STATEMENTS: frozenset[str] = frozenset(['return', 'continue', 'throw', 'goto']) 18 19 def process_tokens(self, tokens: list[Token]) -> None: 20 for token in [t for t in tokens if t.str == 'switch']: 21 # find the opening brace of the switch 22 l_brace: Token | None = token.next 23 if l_brace is not None and \ 24 l_brace.str == '(': 25 l_brace = l_brace.link.next 26 if l_brace is None or \ 27 l_brace.str != '{': 28 continue 29 30 # find default 31 default_token, cases = self._get_default(l_brace.next, l_brace.link) 32 if default_token is None or \ 33 not cases: 34 continue 35 36 # checks for violation 37 continue_outer: bool = False 38 for i, case_colon in enumerate(cases): 39 limit: Token = cases[i + 1] if i + 1 < len(cases) else default_token 40 has_jump: bool = False 41 search: Token | None = case_colon.next 42 while search is not None and \ 43 search is not limit: 44 if not isinstance(search, Token): 45 continue 46 if search.str in self._BREAK_STATEMENTS: 47 has_jump = True 48 break 49 search = search.next 50 if not has_jump: 51 continue_outer = True 52 break 53 if continue_outer: 54 continue 55 self.report_error(default_token, Messages.get(self._MSG)) 56 57 @staticmethod 58 def _get_default( 59 curr_token: Token | None, 60 r_brace: Token | None, 61 ) -> tuple[Token | None, list[Token]]: 62 cases: list[Token] = [] 63 default_token: Token | None = None 64 while curr_token is not None and \ 65 curr_token is not r_brace: 66 if not isinstance(curr_token, Token): 67 continue 68 if curr_token.str == 'case': 69 curr_token = next_sibling(curr_token, lambda t: t is r_brace or t.str == ':') 70 if curr_token and \ 71 curr_token.str == ':': 72 cases.append(curr_token) 73 elif curr_token.str == 'default': 74 default_token = \ 75 next_sibling(curr_token, lambda t: t is r_brace or t.str == ':') 76 curr_token = curr_token.next 77 return default_token, cases
def
process_tokens(self, tokens: list[cppcheck.Cppcheck.addons.cppcheckdata.Token]) -> None:
19 def process_tokens(self, tokens: list[Token]) -> None: 20 for token in [t for t in tokens if t.str == 'switch']: 21 # find the opening brace of the switch 22 l_brace: Token | None = token.next 23 if l_brace is not None and \ 24 l_brace.str == '(': 25 l_brace = l_brace.link.next 26 if l_brace is None or \ 27 l_brace.str != '{': 28 continue 29 30 # find default 31 default_token, cases = self._get_default(l_brace.next, l_brace.link) 32 if default_token is None or \ 33 not cases: 34 continue 35 36 # checks for violation 37 continue_outer: bool = False 38 for i, case_colon in enumerate(cases): 39 limit: Token = cases[i + 1] if i + 1 < len(cases) else default_token 40 has_jump: bool = False 41 search: Token | None = case_colon.next 42 while search is not None and \ 43 search is not limit: 44 if not isinstance(search, Token): 45 continue 46 if search.str in self._BREAK_STATEMENTS: 47 has_jump = True 48 break 49 search = search.next 50 if not has_jump: 51 continue_outer = True 52 break 53 if continue_outer: 54 continue 55 self.report_error(default_token, Messages.get(self._MSG))
class
RedundantElseChecker(rulebook_cppcheck.checkers.rulebook_checkers.RulebookTokenChecker):
12class RedundantElseChecker(RulebookTokenChecker): 13 """See detail: https://hanggrian.github.io/rulebook/rules/#redundant-else""" 14 ID: str = 'redundant-else' 15 _MSG: str = 'redundant.else' 16 17 _JUMP_TOKENS: frozenset[str] = frozenset(['return', 'break', 'continue', 'throw', 'goto']) 18 _ELSE_SIBLING_TOKENS: frozenset[str] = frozenset(['else', ';']) 19 20 def process_tokens(self, tokens: list[Token]) -> None: 21 for token in [t for t in tokens if t.str == 'if']: 22 if_token: Token | None = token 23 while if_token is not None: 24 # skip single if 25 else_token: Token | None = self._get_else_token(if_token) 26 if else_token is None: 27 break 28 29 # checks for violation 30 then_token: Token | None = if_token.next 31 if then_token is not None and \ 32 then_token.str == '(': 33 then_token = then_token.link.next 34 if not self._has_toplevel_jump(then_token): 35 break 36 self.report_error(else_token, Messages.get(self._MSG)) 37 38 next_token: Token | None = else_token.next 39 if next_token is not None and \ 40 next_token.str == 'if': 41 if_token = next_token 42 else: 43 if_token = None 44 45 @staticmethod 46 def _has_toplevel_jump(block_token: Token | None) -> bool: 47 if block_token is None or block_token.str != '{': 48 return block_token is not None and \ 49 block_token.str in RedundantElseChecker._JUMP_TOKENS 50 tok: Token | None = block_token.next 51 close: Token | None = block_token.link 52 last_jump: bool = False 53 while tok is not None and tok is not close: 54 if tok.str in RedundantElseChecker._JUMP_TOKENS: 55 last_jump = True 56 elif tok.str == '{': 57 last_jump = False 58 tok = tok.link # jump over nested block entirely 59 elif tok.str == ';': 60 if not last_jump: 61 last_jump = False # keep last_jump state accurate per-statement 62 if tok is not None: 63 tok = tok.next 64 return last_jump 65 66 @staticmethod 67 def _get_else_token(if_token: Token) -> Token | None: 68 curr_token: Token | None = if_token.next 69 if curr_token is not None and \ 70 curr_token.str == '(': 71 curr_token = curr_token.link.next 72 if curr_token is not None and \ 73 curr_token.str == '{': 74 curr_token = curr_token.link.next 75 elif curr_token is not None: 76 curr_token = \ 77 next_sibling( 78 curr_token, 79 lambda t: t.str in RedundantElseChecker._ELSE_SIBLING_TOKENS, 80 ) 81 if curr_token is not None and \ 82 curr_token.str == ';': 83 curr_token = curr_token.next 84 return curr_token \ 85 if curr_token is not None and curr_token.str == 'else' \ 86 else None
def
process_tokens(self, tokens: list[cppcheck.Cppcheck.addons.cppcheckdata.Token]) -> None:
20 def process_tokens(self, tokens: list[Token]) -> None: 21 for token in [t for t in tokens if t.str == 'if']: 22 if_token: Token | None = token 23 while if_token is not None: 24 # skip single if 25 else_token: Token | None = self._get_else_token(if_token) 26 if else_token is None: 27 break 28 29 # checks for violation 30 then_token: Token | None = if_token.next 31 if then_token is not None and \ 32 then_token.str == '(': 33 then_token = then_token.link.next 34 if not self._has_toplevel_jump(then_token): 35 break 36 self.report_error(else_token, Messages.get(self._MSG)) 37 38 next_token: Token | None = else_token.next 39 if next_token is not None and \ 40 next_token.str == 'if': 41 if_token = next_token 42 else: 43 if_token = None
class
RedundantIfChecker(rulebook_cppcheck.checkers.rulebook_checkers.RulebookTokenChecker):
11class RedundantIfChecker(RulebookTokenChecker): 12 """See detail: https://hanggrian.github.io/rulebook/rules/#redundant-if""" 13 ID: str = 'redundant-if' 14 _MSG: str = 'redundant.if' 15 16 _BOOLS: frozenset[str] = frozenset(['true', 'false']) 17 18 def process_tokens(self, tokens: list[Token]) -> None: 19 for token in [t for t in tokens if t.str == 'if']: 20 paren: Token | None = token.next 21 if paren is None or paren.str != '(': 22 continue 23 24 then_value, after_then = \ 25 self._read_bool_return(paren.link.next if paren.link else None) 26 if then_value is None or \ 27 after_then is None: 28 continue 29 30 # checks for violation 31 else_value, _ = \ 32 self._read_bool_return(after_then.next) \ 33 if after_then.str == 'else' \ 34 else self._read_bool_return(after_then) 35 if else_value is None: 36 continue 37 self.report_error(token, Messages.get(self._MSG, then_value, else_value)) 38 39 @staticmethod 40 def _read_bool_return(token: Token | None) -> tuple[str | None, Token | None]: 41 if token is None: 42 return None, None 43 braced: bool = token.str == '{' 44 if braced: 45 token = token.next 46 if token is None or token.str != 'return': 47 return None, None 48 value: Token | None = token.next 49 if value is None or value.str not in ('true', 'false'): 50 return None, None 51 semi: Token | None = value.next 52 if semi is None or semi.str != ';': 53 return None, None 54 after: Token | None = semi.next 55 if braced: 56 if after is None or after.str != '}': 57 return None, None 58 after = after.next 59 return value.str, after
def
process_tokens(self, tokens: list[cppcheck.Cppcheck.addons.cppcheckdata.Token]) -> None:
18 def process_tokens(self, tokens: list[Token]) -> None: 19 for token in [t for t in tokens if t.str == 'if']: 20 paren: Token | None = token.next 21 if paren is None or paren.str != '(': 22 continue 23 24 then_value, after_then = \ 25 self._read_bool_return(paren.link.next if paren.link else None) 26 if then_value is None or \ 27 after_then is None: 28 continue 29 30 # checks for violation 31 else_value, _ = \ 32 self._read_bool_return(after_then.next) \ 33 if after_then.str == 'else' \ 34 else self._read_bool_return(after_then) 35 if else_value is None: 36 continue 37 self.report_error(token, Messages.get(self._MSG, then_value, else_value))
class
TodoCommentChecker(rulebook_cppcheck.checkers.rulebook_checkers.RulebookFileChecker):
14class TodoCommentChecker(RulebookFileChecker): 15 """See detail: https://hanggrian.github.io/rulebook/rules/#todo-comment""" 16 ID: str = 'todo-comment' 17 _MSG_KEYWORD: str = 'todo.comment.keyword' 18 _MSG_SEPARATOR: str = 'todo.comment.separator' 19 20 _KEYWORD_REGEX = regex(r'\b(?i:fixme|todo)(?<!FIXME|TODO)\b') 21 _SEPARATOR_REGEX = regex(r'\b(?i:todo|fixme)([^ \t\n])') 22 23 def check_file(self, token: Token, content: str) -> None: 24 for match in finditer(r'//.*|/\*(.*?)\*/', content, DOTALL): 25 # obtain comment content 26 comment_text: str = match.group(0) 27 start_line: int = content.count('\n', 0, match.start()) + 1 28 29 for i, line in enumerate(comment_text.splitlines()): 30 # checks for violation 31 line_no: int = start_line + i 32 keyword_match = self._KEYWORD_REGEX.search(line) 33 if keyword_match is not None: 34 self.report_error( 35 token, 36 Messages.get(self._MSG_KEYWORD, keyword_match.group(0)), 37 line_no, 38 ) 39 separator_match = self._SEPARATOR_REGEX.search(line) 40 if separator_match is None: 41 continue 42 self.report_error( 43 token, 44 Messages.get(self._MSG_SEPARATOR, separator_match.group(1)), 45 line_no, 46 )
def
check_file( self, token: cppcheck.Cppcheck.addons.cppcheckdata.Token, content: str) -> None:
23 def check_file(self, token: Token, content: str) -> None: 24 for match in finditer(r'//.*|/\*(.*?)\*/', content, DOTALL): 25 # obtain comment content 26 comment_text: str = match.group(0) 27 start_line: int = content.count('\n', 0, match.start()) + 1 28 29 for i, line in enumerate(comment_text.splitlines()): 30 # checks for violation 31 line_no: int = start_line + i 32 keyword_match = self._KEYWORD_REGEX.search(line) 33 if keyword_match is not None: 34 self.report_error( 35 token, 36 Messages.get(self._MSG_KEYWORD, keyword_match.group(0)), 37 line_no, 38 ) 39 separator_match = self._SEPARATOR_REGEX.search(line) 40 if separator_match is None: 41 continue 42 self.report_error( 43 token, 44 Messages.get(self._MSG_SEPARATOR, separator_match.group(1)), 45 line_no, 46 )
class
UnnecessaryReturnChecker(rulebook_cppcheck.checkers.rulebook_checkers.RulebookChecker):
11class UnnecessaryReturnChecker(RulebookChecker): 12 """See detail: https://hanggrian.github.io/rulebook/rules/#unnecessary-return""" 13 ID: str = 'unnecessary-return' 14 _MSG: str = 'unnecessary.return' 15 16 def get_scopeset(self) -> set[str]: 17 return {'Function'} 18 19 def visit_scope(self, scope: Scope) -> None: 20 # checks for violation 21 function: Function | None = scope.function 22 if function is None: 23 return 24 if function.tokenDef.previous.str != 'void': 25 return 26 last_token: Token | None = scope.bodyEnd.previous 27 if not last_token or \ 28 last_token.str != ';' or \ 29 last_token.previous.str != 'return': 30 return 31 return_token: Token | None = last_token.previous 32 if return_token is None: 33 return 34 prev_token: Token | None = return_token.previous 35 if prev_token is None or \ 36 prev_token.str not in {'{', ';'}: 37 return 38 self.report_error(return_token, Messages.get(self._MSG))
def
visit_scope(self, scope: cppcheck.Cppcheck.addons.cppcheckdata.Scope) -> None:
19 def visit_scope(self, scope: Scope) -> None: 20 # checks for violation 21 function: Function | None = scope.function 22 if function is None: 23 return 24 if function.tokenDef.previous.str != 'void': 25 return 26 last_token: Token | None = scope.bodyEnd.previous 27 if not last_token or \ 28 last_token.str != ';' or \ 29 last_token.previous.str != 'return': 30 return 31 return_token: Token | None = last_token.previous 32 if return_token is None: 33 return 34 prev_token: Token | None = return_token.previous 35 if prev_token is None or \ 36 prev_token.str not in {'{', ';'}: 37 return 38 self.report_error(return_token, Messages.get(self._MSG))
class
UnnecessaryTrailingWhitespaceChecker(rulebook_cppcheck.checkers.rulebook_checkers.RulebookFileChecker):
11class UnnecessaryTrailingWhitespaceChecker(RulebookFileChecker): 12 """See detail: https://hanggrian.github.io/rulebook/rules/#unnecessary-trailing-whitespace""" 13 ID: str = 'unnecessary-trailing-whitespace' 14 _MSG: str = 'unnecessary.trailing.whitespace' 15 16 def check_file(self, token: Token, content: str) -> None: 17 # checks for violation 18 for line_number, line in enumerate(content.splitlines(), start=1): 19 if line == line.rstrip(' \t'): 20 continue 21 self.report_error( 22 token, 23 Messages.get(self._MSG), 24 line_number, 25 len(line.rstrip(' \t')) + 1, 26 )
def
check_file( self, token: cppcheck.Cppcheck.addons.cppcheckdata.Token, content: str) -> None:
16 def check_file(self, token: Token, content: str) -> None: 17 # checks for violation 18 for line_number, line in enumerate(content.splitlines(), start=1): 19 if line == line.rstrip(' \t'): 20 continue 21 self.report_error( 22 token, 23 Messages.get(self._MSG), 24 line_number, 25 len(line.rstrip(' \t')) + 1, 26 )
class
UppercaseLChecker(rulebook_cppcheck.checkers.rulebook_checkers.RulebookTokenChecker):
11class UppercaseLChecker(RulebookTokenChecker): 12 """See detail: https://hanggrian.github.io/rulebook/rules/#uppercase-l""" 13 ID: str = 'uppercase-l' 14 _MSG: str = 'uppercase.l' 15 16 def process_tokens(self, tokens: list[Token]) -> None: 17 # checks for violation 18 for token in [t for t in tokens if t.isNumber]: 19 if 'l' not in token.str: 20 return 21 self.report_error(token, Messages.get(self._MSG))