From 89040bee1c53f2fe67edb70717561fb370202502 Mon Sep 17 00:00:00 2001 From: Jules Villard Date: Tue, 26 Jun 2018 02:16:00 -0700 Subject: [PATCH] [backend] do not let caches grow too big Summary: This changes the behaviour back to before the "fork once then pipe new tasks for each source files" era: before that we would fork once per source file, which had the effect of emptying the caches for every source file. Without this the caches can grow unchecked. This is probably suboptimal but should at least be the same behaviour as before. Reviewed By: jeremydubreil Differential Revision: D8615124 fbshipit-source-id: 69fc101 --- infer/src/backend/InferAnalyze.ml | 12 +++++++++--- infer/src/backend/Tasks.ml | 4 ++++ infer/src/backend/ondemand.ml | 2 ++ infer/src/backend/ondemand.mli | 3 +++ 4 files changed, 18 insertions(+), 3 deletions(-) diff --git a/infer/src/backend/InferAnalyze.ml b/infer/src/backend/InferAnalyze.ml index a0a5d3cd3..2fd3f392e 100644 --- a/infer/src/backend/InferAnalyze.ml +++ b/infer/src/backend/InferAnalyze.ml @@ -10,11 +10,19 @@ open! IStd module L = Logging +let clear_caches () = + Ondemand.clear_cache () ; + Summary.clear_cache () ; + Typ.Procname.SQLite.clear_cache () + + (** Create tasks to analyze an execution environment *) let analyze_source_file : SourceFile.t Tasks.doer = fun source_file -> let exe_env = Exe_env.mk () in L.(debug Analysis Medium) "@\nProcessing '%a'@." SourceFile.pp source_file ; + (* clear cache for each source file to avoid it growing unboundedly *) + clear_caches () ; Callbacks.analyze_file exe_env source_file ; if Config.write_html then Printer.write_all_html_files source_file @@ -79,9 +87,7 @@ let main ~changed_files = (if Int.equal n_source_files 1 then "" else "s") Config.results_dir ; (* empty all caches to minimize the process heap to have less work to do when forking *) - Summary.clear_cache () ; - Typ.Procname.SQLite.clear_cache () ; - Random.self_init () ; + clear_caches () ; if Int.equal Config.jobs 1 then ( Tasks.run_sequentially ~f:analyze_source_file source_files_to_analyze ; L.progress "@\nAnalysis finished in %as@." Pp.elapsed_time () ) diff --git a/infer/src/backend/Tasks.ml b/infer/src/backend/Tasks.ml index 4323bd7df..8a2ac36e6 100644 --- a/infer/src/backend/Tasks.ml +++ b/infer/src/backend/Tasks.ml @@ -27,9 +27,13 @@ let run_sequentially ~(f: 'a doer) (tasks: 'a list) : unit = let fork_protect ~f x = + (* this is needed whenever a new process is started *) EventLogger.prepare () ; L.reset_formatters () ; ResultsDatabase.new_database_connection () ; + (* get different streams of random numbers in each fork, in particular to lessen contention in + `Filename.mk_temp` *) + Random.self_init () ; f x diff --git a/infer/src/backend/ondemand.ml b/infer/src/backend/ondemand.ml index 2707b90c4..35f104bb1 100644 --- a/infer/src/backend/ondemand.ml +++ b/infer/src/backend/ondemand.ml @@ -236,6 +236,8 @@ let analyze_proc_name ?caller_pdesc callee_pname = summary_option +let clear_cache () = Typ.Procname.Hash.clear (Lazy.force cached_results) + (** Find a proc desc for the procedure, perhaps loading it from disk. *) let get_proc_desc callee_pname = match !callbacks_ref with Some callbacks -> callbacks.get_proc_desc callee_pname | None -> None diff --git a/infer/src/backend/ondemand.mli b/infer/src/backend/ondemand.mli index 90db3cf7d..34e046d65 100644 --- a/infer/src/backend/ondemand.mli +++ b/infer/src/backend/ondemand.mli @@ -31,3 +31,6 @@ val set_callbacks : callbacks -> unit val unset_callbacks : unit -> unit (** Unset the callbacks used to perform on-demand analysis. *) + +val clear_cache : unit -> unit +(** empty the cache of ondemand results *)