[analyze] be more consistent in logging

Summary:
Minor improvements to a bunch of stuff
- mostly, always log backend stats and time spent analysing, regardless of `-j 1`
- output stats more like they appear in the record
- split `InferAnalyze.main` to be more readable (hopefully)

Reviewed By: mityal

Differential Revision: D16440586

fbshipit-source-id: 6f91f53cd
master
Jules Villard 5 years ago committed by Facebook Github Bot
parent 4d1b300e5b
commit 8fbdc4e252

@ -95,8 +95,8 @@ let pp f stats =
in in
F.fprintf f F.fprintf f
"@[Backend stats:@\n\ "@[Backend stats:@\n\
@[<v2> file_try_load= %d@;read_from_disk= %d@;cache_hits= %d (%t)@;cache_misses= \ @[<v2> summary_file_try_load= %d@;summary_read_from_disk= %d@;summary_cache_hits= %d \
%d@;has_model_queries= %d@;@]@]@." (%t)@;summary_cache_misses= %d@;summary_has_model_queries= %d@;@]@]@."
summary_file_try_load summary_read_from_disk summary_cache_hits summary_file_try_load summary_read_from_disk summary_cache_hits
(pp_hit_percent summary_cache_hits summary_cache_misses) (pp_hit_percent summary_cache_hits summary_cache_misses)
summary_cache_misses summary_has_model_queries summary_cache_misses summary_has_model_queries

@ -9,6 +9,7 @@
(** Main module for the analysis after the capture phase *) (** Main module for the analysis after the capture phase *)
open! IStd open! IStd
module F = Format
module L = Logging module L = Logging
let clear_caches () = let clear_caches () =
@ -87,13 +88,7 @@ let register_active_checkers () =
RegisterCheckers.get_active_checkers () |> RegisterCheckers.register RegisterCheckers.get_active_checkers () |> RegisterCheckers.register
let main ~changed_files = let get_source_files_to_analyze ~changed_files =
register_active_checkers () ;
if Config.reanalyze then (
L.progress "Invalidating procedures to be reanalyzed@." ;
Summary.OnDisk.reset_all ~filter:(Lazy.force Filtering.procedures_filter) () ;
L.progress "Done@." )
else DB.Results_dir.clean_specs_dir () ;
let n_all_source_files = ref 0 in let n_all_source_files = ref 0 in
let n_source_files_to_analyze = ref 0 in let n_source_files_to_analyze = ref 0 in
let filter sourcefile = let filter sourcefile =
@ -107,18 +102,25 @@ let main ~changed_files =
in in
ScubaLogging.log_count ~label:"source_files_to_analyze" ~value:!n_source_files_to_analyze ; ScubaLogging.log_count ~label:"source_files_to_analyze" ~value:!n_source_files_to_analyze ;
let source_files_to_analyze = SourceFiles.get_all ~filter () in let source_files_to_analyze = SourceFiles.get_all ~filter () in
L.progress "Found %d%s source file%s to analyze in %s@." !n_source_files_to_analyze let pp_n_source_files ~n_total fmt n_to_analyze =
( if Config.reactive_mode || Option.is_some changed_files then let pp_total_if_not_all fmt n_total =
" (out of " ^ string_of_int !n_all_source_files ^ ")" if Config.reactive_mode || Option.is_some changed_files then
else "" ) F.fprintf fmt " (out of %d)" n_total
(if Int.equal !n_source_files_to_analyze 1 then "" else "s") in
Config.results_dir ; Format.fprintf fmt "Found %d%a source file%s to analyze in %s" n_to_analyze pp_total_if_not_all
(* empty all caches to minimize the process heap to have less work to do when forking *) n_total
clear_caches () ; (if Int.equal n_to_analyze 1 then "" else "s")
Config.results_dir
in
L.progress "%a@." (pp_n_source_files ~n_total:!n_all_source_files) !n_source_files_to_analyze ;
source_files_to_analyze
let analyze source_files_to_analyze =
if Int.equal Config.jobs 1 then ( if Int.equal Config.jobs 1 then (
let target_files = List.rev_map source_files_to_analyze ~f:(fun sf -> TaskScheduler.File sf) in let target_files = List.rev_map source_files_to_analyze ~f:(fun sf -> TaskScheduler.File sf) in
Tasks.run_sequentially ~f:analyze_target target_files ; Tasks.run_sequentially ~f:analyze_target target_files ;
L.progress "@\nAnalysis finished in %as@." Pp.elapsed_time () ) BackendStats.get () )
else ( else (
L.environment_info "Parallel jobs: %d@." Config.jobs ; L.environment_info "Parallel jobs: %d@." Config.jobs ;
let tasks = TaskScheduler.schedule source_files_to_analyze in let tasks = TaskScheduler.schedule source_files_to_analyze in
@ -127,15 +129,29 @@ let main ~changed_files =
Tasks.Runner.create ~jobs:Config.jobs ~f:analyze_target ~child_epilogue:BackendStats.get Tasks.Runner.create ~jobs:Config.jobs ~f:analyze_target ~child_epilogue:BackendStats.get
~tasks ~tasks
in in
let all_stats = Tasks.Runner.run runner in let workers_stats = Tasks.Runner.run runner in
let stats = let collected_stats =
Array.fold all_stats ~init:BackendStats.initial ~f:(fun collated_stats stats_opt -> Array.fold workers_stats ~init:BackendStats.initial ~f:(fun collated_stats stats_opt ->
match stats_opt with match stats_opt with
| None -> | None ->
collated_stats collated_stats
| Some stats -> | Some stats ->
L.debug Analysis Quiet "gotstats:@\n%a@." BackendStats.pp stats ;
BackendStats.merge stats collated_stats ) BackendStats.merge stats collated_stats )
in in
L.debug Analysis Quiet "collected stats:@\n%a@." BackendStats.pp stats ) ; collected_stats )
output_json_makefile_stats source_files_to_analyze
let main ~changed_files =
register_active_checkers () ;
if Config.reanalyze then (
L.progress "Invalidating procedures to be reanalyzed@." ;
Summary.OnDisk.reset_all ~filter:(Lazy.force Filtering.procedures_filter) () ;
L.progress "Done@." )
else DB.Results_dir.clean_specs_dir () ;
let source_files = get_source_files_to_analyze ~changed_files in
(* empty all caches to minimize the process heap to have less work to do when forking *)
clear_caches () ;
let stats = analyze source_files in
L.progress "@\nAnalysis finished in %as@." Pp.elapsed_time () ;
L.debug Analysis Quiet "collected stats:@\n%a@." BackendStats.pp stats ;
output_json_makefile_stats source_files

Loading…
Cancel
Save