Path 1: 33 calls (0.85)

PyLinter (33)

2 (25) 1 (8)

list (28) PyLinter._iterate_file_descrs def (3) list_iterator (2)

None (29) ['--enable', 'R9999'] (1) ['test_directory'] (1) ['/Users/andrehora/Documents/git/projects-pathspotter/pylint/tests/regrtest_data/regression...

1def check_parallel(
2    linter: PyLinter,
3    jobs: int,
4    files: Iterable[FileItem],
5    arguments: None | str | Sequence[str] = None,
6) -> None:
7    """Use the given linter to lint the files with given amount of workers (jobs).
8
9    This splits the work filestream-by-filestream. If you need to do work across
10    multiple files, as in the similarity-checker, then implement the map/reduce mixin functionality.
11    """
12    # The linter is inherited by all the pool's workers, i.e. the linter
13    # is identical to the linter object here. This is required so that
14    # a custom PyLinter object can be used.
15    initializer = functools.partial(_worker_initialize, arguments=arguments)
16    with ProcessPoolExecutor(
17        max_workers=jobs, initializer=initializer, initargs=(dill.dumps(linter),)
18    ) as executor:
19        linter.open()
20        all_stats = []
21        all_mapreduce_data: defaultdict[
22            int, list[defaultdict[str, list[Any]]]
23        ] = defaultdict(list)
24
25        # Maps each file to be worked on by a single _worker_check_single_file() call,
26        # collecting any map/reduce data by checker module so that we can 'reduce' it
27        # later.
28        for (
29            worker_idx,  # used to merge map/reduce data across workers
30            module,
31            file_path,
32            base_name,
33            messages,
34            stats,
35            msg_status,
36            mapreduce_data,
37        ) in executor.map(_worker_check_single_file, files):
38            linter.file_state.base_name = base_name
39            linter.file_state._is_base_filestate = False
40            linter.set_current_module(module, file_path)
41            for msg in messages:
42                linter.reporter.handle_message(msg)
43            all_stats.append(stats)
44            all_mapreduce_data[worker_idx].append(mapreduce_data)
45            linter.msg_status |= msg_status
46
47    _merge_mapreduce_data(linter, all_mapreduce_data)
48    linter.stats = merge_stats([linter.stats] + all_stats)
            

Path 2: 4 calls (0.1)

PyLinter (3) _CustomPyLinter (1)

2 (4)

PyLinter._iterate_file_descrs def (4)

['/Users/andrehora/Documents/git/projects-pathspotter/pylint/tests/functional/a/arguments.py'] (1) ['/Users/andrehora/Documents/git/projects-pathspott...

1def check_parallel(
2    linter: PyLinter,
3    jobs: int,
4    files: Iterable[FileItem],
5    arguments: None | str | Sequence[str] = None,
6) -> None:
7    """Use the given linter to lint the files with given amount of workers (jobs).
8
9    This splits the work filestream-by-filestream. If you need to do work across
10    multiple files, as in the similarity-checker, then implement the map/reduce mixin functionality.
11    """
12    # The linter is inherited by all the pool's workers, i.e. the linter
13    # is identical to the linter object here. This is required so that
14    # a custom PyLinter object can be used.
15    initializer = functools.partial(_worker_initialize, arguments=arguments)
16    with ProcessPoolExecutor(
17        max_workers=jobs, initializer=initializer, initargs=(dill.dumps(linter),)
18    ) as executor:
19        linter.open()
20        all_stats = []
21        all_mapreduce_data: defaultdict[
22            int, list[defaultdict[str, list[Any]]]
23        ] = defaultdict(list)
24
25        # Maps each file to be worked on by a single _worker_check_single_file() call,
26        # collecting any map/reduce data by checker module so that we can 'reduce' it
27        # later.
28        for (
29            worker_idx,  # used to merge map/reduce data across workers
30            module,
31            file_path,
32            base_name,
33            messages,
34            stats,
35            msg_status,
36            mapreduce_data,
37        ) in executor.map(_worker_check_single_file, files):
38            linter.file_state.base_name = base_name
39            linter.file_state._is_base_filestate = False
40            linter.set_current_module(module, file_path)
41            for msg in messages:
42                linter.reporter.handle_message(msg)
43            all_stats.append(stats)
44            all_mapreduce_data[worker_idx].append(mapreduce_data)
45            linter.msg_status |= msg_status
46
47    _merge_mapreduce_data(linter, all_mapreduce_data)
48    linter.stats = merge_stats([linter.stats] + all_stats)
            

Path 3: 1 calls (0.03)

PyLinter (1)

1 (1)

list_iterator (1)

1 (1)

BrokenProcessPool (1)

1def check_parallel(
2    linter: PyLinter,
3    jobs: int,
4    files: Iterable[FileItem],
5    arguments: None | str | Sequence[str] = None,
6) -> None:
7    """Use the given linter to lint the files with given amount of workers (jobs).
8
9    This splits the work filestream-by-filestream. If you need to do work across
10    multiple files, as in the similarity-checker, then implement the map/reduce mixin functionality.
11    """
12    # The linter is inherited by all the pool's workers, i.e. the linter
13    # is identical to the linter object here. This is required so that
14    # a custom PyLinter object can be used.
15    initializer = functools.partial(_worker_initialize, arguments=arguments)
16    with ProcessPoolExecutor(
17        max_workers=jobs, initializer=initializer, initargs=(dill.dumps(linter),)
18    ) as executor:
19        linter.open()
20        all_stats = []
21        all_mapreduce_data: defaultdict[
22            int, list[defaultdict[str, list[Any]]]
23        ] = defaultdict(list)
24
25        # Maps each file to be worked on by a single _worker_check_single_file() call,
26        # collecting any map/reduce data by checker module so that we can 'reduce' it
27        # later.
28        for (
29            worker_idx,  # used to merge map/reduce data across workers
30            module,
31            file_path,
32            base_name,
33            messages,
34            stats,
35            msg_status,
36            mapreduce_data,
37        ) in executor.map(_worker_check_single_file, files):
38            linter.file_state.base_name = base_name
39            linter.file_state._is_base_filestate = False
40            linter.set_current_module(module, file_path)
41            for msg in messages:
42                linter.reporter.handle_message(msg)
43            all_stats.append(stats)
44            all_mapreduce_data[worker_idx].append(mapreduce_data)
45            linter.msg_status |= msg_status
46
47    _merge_mapreduce_data(linter, all_mapreduce_data)
48    linter.stats = merge_stats([linter.stats] + all_stats)
            

Path 4: 1 calls (0.03)

PyLinter (1)

2 (1)

PyLinter._iterate_file_descrs def (1)

['not_here', 'not_here_too'] (1)

1def check_parallel(
2    linter: PyLinter,
3    jobs: int,
4    files: Iterable[FileItem],
5    arguments: None | str | Sequence[str] = None,
6) -> None:
7    """Use the given linter to lint the files with given amount of workers (jobs).
8
9    This splits the work filestream-by-filestream. If you need to do work across
10    multiple files, as in the similarity-checker, then implement the map/reduce mixin functionality.
11    """
12    # The linter is inherited by all the pool's workers, i.e. the linter
13    # is identical to the linter object here. This is required so that
14    # a custom PyLinter object can be used.
15    initializer = functools.partial(_worker_initialize, arguments=arguments)
16    with ProcessPoolExecutor(
17        max_workers=jobs, initializer=initializer, initargs=(dill.dumps(linter),)
18    ) as executor:
19        linter.open()
20        all_stats = []
21        all_mapreduce_data: defaultdict[
22            int, list[defaultdict[str, list[Any]]]
23        ] = defaultdict(list)
24
25        # Maps each file to be worked on by a single _worker_check_single_file() call,
26        # collecting any map/reduce data by checker module so that we can 'reduce' it
27        # later.
28        for (
29            worker_idx,  # used to merge map/reduce data across workers
30            module,
31            file_path,
32            base_name,
33            messages,
34            stats,
35            msg_status,
36            mapreduce_data,
37        ) in executor.map(_worker_check_single_file, files):
38            linter.file_state.base_name = base_name
39            linter.file_state._is_base_filestate = False
40            linter.set_current_module(module, file_path)
41            for msg in messages:
42                linter.reporter.handle_message(msg)
43            all_stats.append(stats)
44            all_mapreduce_data[worker_idx].append(mapreduce_data)
45            linter.msg_status |= msg_status
46
47    _merge_mapreduce_data(linter, all_mapreduce_data)
48    linter.stats = merge_stats([linter.stats] + all_stats)