[backend] Add support for command-line option --per-procedure-parallelism

Summary:
Add a new command-line option `--per-procedure-parallelism`, to change the granularity of parallelism of the analysis from file to procedure.
This is intended for `--reactive` mode where e.g. a single file is changed and the analysis currently uses just one core.
When the option is used, the Makefile mechanism is replaced by using forking instead.
The parent process does as little allocation as possible, to avoid taxing the kernel.

Caveats:
- Not active in Java, (issues with camlzip).
- Not active in checkers, yet.

Example use:
```
infer --reactive --changed-files-index index.txt --per-procedure-parallelism -- analyze
```

Reviewed By: jberdine

Differential Revision: D4634884

fbshipit-source-id: e358c18
master
Cristiano Calcagno 8 years ago committed by Facebook Github Bot
parent f4c424b4cc
commit ec6a3cf6bb

@ -158,16 +158,18 @@ let attr_tbl = Typ.Procname.Hash.create 16;
let defined_attr_tbl = Typ.Procname.Hash.create 16;
let load_attributes proc_name =>
let load_attributes cache::cache proc_name =>
try (Typ.Procname.Hash.find attr_tbl proc_name) {
| Not_found =>
let proc_attributes = load_attr defined_only::false proc_name;
switch proc_attributes {
| Some attrs =>
if cache {
Typ.Procname.Hash.add attr_tbl proc_name proc_attributes;
if attrs.is_defined {
Typ.Procname.Hash.add defined_attr_tbl proc_name proc_attributes
}
}
| None => ()
};
proc_attributes
@ -257,8 +259,8 @@ let stats () => {
/* Find the file where the procedure was captured, if a cfg for that file exists.
Return also a boolean indicating whether the procedure is defined in an
include file. */
let find_file_capturing_procedure pname =>
switch (load_attributes pname) {
let find_file_capturing_procedure cache::cache=true pname =>
switch (load_attributes cache::cache pname) {
| None => None
| Some proc_attributes =>
let source_file = proc_attributes.ProcAttributes.source_file_captured;

@ -15,8 +15,9 @@ open! IStd;
let store_attributes: ProcAttributes.t => unit;
/** Load the attributes for the procedure from the attributes database. */
let load_attributes: Typ.Procname.t => option ProcAttributes.t;
/** Load the attributes for the procedure from the attributes database.
If cache is true, add the attribute to the global cache */
let load_attributes: cache::bool => Typ.Procname.t => option ProcAttributes.t;
/** Load attrubutes for the procedure but only if is_defined is true */
@ -30,9 +31,10 @@ let get_correct_type_from_objc_class_name: Typename.t => option Typ.t;
/* Find the file where the procedure was captured, if a cfg for that file exists.
Return also a boolean indicating whether the procedure is defined in an
include file. */
include file.
If cache is true, add the attribute to the global cache */
let find_file_capturing_procedure:
Typ.Procname.t => option (SourceFile.t, [ | `Include | `Source]);
cache::bool? => Typ.Procname.t => option (SourceFile.t, [ | `Include | `Source]);
type t;

@ -15,47 +15,60 @@ let module L = Logging;
let module F = Format;
let analyze_exe_env exe_env => {
let init_time = Unix.gettimeofday ();
/** Create tasks to analyze an execution environment */
let analyze_exe_env_tasks cluster exe_env :Tasks.t => {
L.log_progress_file ();
Specs.clear_spec_tbl ();
Random.self_init ();
if Config.checkers {
/* run the checkers only */
Tasks.create [
fun () => {
let call_graph = Exe_env.get_cg exe_env;
Callbacks.iterate_callbacks call_graph exe_env;
Printer.write_all_html_files exe_env
if Config.write_html {
Printer.write_all_html_files cluster
}
}
]
} else {
/* run the full analysis */
Interproc.do_analysis exe_env;
Printer.write_all_html_files exe_env;
Interproc.print_stats exe_env;
let elapsed = Unix.gettimeofday () -. init_time;
L.out "Interprocedural footprint analysis terminated in %f sec@." elapsed
Tasks.create
(Interproc.do_analysis_closures exe_env)
continuation::(
if (Config.write_html || Config.developer_mode) {
Some (
fun () => {
if Config.write_html {
Printer.write_all_html_files cluster
};
if Config.developer_mode {
Interproc.print_stats cluster
}
}
)
} else {
None
}
)
}
};
/** Create an exe_env from a cluster. */
let exe_env_from_cluster cluster => {
let _exe_env = Exe_env.create ();
Exe_env.add_cg _exe_env cluster;
Exe_env.freeze _exe_env
};
/** Analyze a cluster of files */
let analyze_cluster cluster_num (cluster: Cluster.t) => {
let exe_env = exe_env_from_cluster cluster;
/** Create tasks to analyze a cluster */
let analyze_cluster_tasks cluster_num (cluster: Cluster.t) :Tasks.t => {
let exe_env = Exe_env.from_cluster cluster;
let defined_procs = Cg.get_defined_nodes (Exe_env.get_cg exe_env);
let num_procs = List.length defined_procs;
L.err "@.Processing cluster #%d with %d procedures@." (cluster_num + 1) num_procs;
analyze_exe_env exe_env
analyze_exe_env_tasks cluster exe_env
};
let analyze_cluster cluster_num cluster => Tasks.run (analyze_cluster_tasks cluster_num cluster);
let output_json_makefile_stats clusters => {
let clusters_to_analyze = List.filter f::ClusterMakefile.cluster_should_be_analyzed clusters;
let num_files = List.length clusters_to_analyze;
let num_files = List.length clusters;
let num_procs = 0;
/* can't compute it at this stage */
let num_lines = 0;
@ -93,6 +106,26 @@ let print_stdout_legend () => {
L.stdout "@\n@?"
};
let cluster_should_be_analyzed cluster => {
let fname = DB.source_dir_to_string cluster;
let in_ondemand_config =
Option.map f::(fun dirs => String.Set.mem dirs fname) Ondemand.dirs_to_analyze;
let check_modified () => {
let modified = DB.file_was_updated_after_start (DB.filename_from_string fname);
if (modified && Config.developer_mode) {
L.stdout "Modified: %s@." fname
};
modified
};
switch in_ondemand_config {
| Some b =>
/* ondemand config file is specified */
b
| None when Config.reactive_mode => check_modified ()
| None => true
}
};
let main makefile => {
BuiltinDefn.init ();
RegisterCheckers.register ();
@ -110,14 +143,48 @@ let main makefile => {
if Config.merge {
MergeCapture.merge_captured_targets ()
};
let clusters = DB.find_source_dirs ();
L.stdout "Found %d source files in %s@." (List.length clusters) Config.results_dir;
let all_clusters = DB.find_source_dirs ();
let clusters_to_analyze = List.filter f::cluster_should_be_analyzed all_clusters;
L.stdout
"Found %d (out of %d) source files to be analyzed in %s@."
(List.length clusters_to_analyze)
(List.length all_clusters)
Config.results_dir;
if (makefile != "" || Config.per_procedure_parallelism) {
let is_java () =>
List.exists
f::(
fun cl => SourceFile.string_crc_has_extension ext::"java" (DB.source_dir_to_string cl)
)
all_clusters;
if (not Config.per_procedure_parallelism) {
ClusterMakefile.create_cluster_makefile clusters_to_analyze makefile
} else {
/* per-procedure parallelism */
if (is_java ()) {
/* Java uses ZipLib which is incompatible with forking */
L.stderr "Error: option --per-procedure-parallelism not supported with Java@.";
exit 1
};
L.stdout "per-procedure parallelism jobs:%d@." Config.jobs;
if (makefile != "") {
ClusterMakefile.create_cluster_makefile clusters makefile
ClusterMakefile.create_cluster_makefile [] makefile
};
/* Prepare tasks one cluster at a time while executing in parallel */
let runner = Tasks.Runner.create jobs::Config.jobs;
let cluster_start_tasks i cluster => {
let tasks = analyze_cluster_tasks i cluster;
let aggregate_tasks = Tasks.aggregate size::1 tasks;
Tasks.Runner.start runner tasks::aggregate_tasks
};
List.iteri f::cluster_start_tasks clusters_to_analyze;
Tasks.Runner.complete runner
}
} else {
List.iteri f::(fun i cluster => analyze_cluster i cluster) clusters;
/* This branch is reached when -j 1 is used */
List.iteri f::analyze_cluster clusters_to_analyze;
L.stdout "@\nAnalysis finished in %as@." Pp.elapsed_time ()
};
output_json_makefile_stats clusters
output_json_makefile_stats clusters_to_analyze
}
};

@ -147,7 +147,10 @@ let register_report_at_exit file =
try
Unix.mkdir_p (Filename.dirname file);
let stats_oc = open_out file in
Yojson.Basic.pretty_to_channel stats_oc json_stats ;
let stats_fd = Unix.descr_of_out_channel stats_oc in
ignore (Unix.flock stats_fd Unix.Flock_command.lock_exclusive);
Yojson.Basic.pretty_to_channel stats_oc json_stats;
ignore (Unix.flock stats_fd Unix.Flock_command.unlock);
Out_channel.close stats_oc
with exc ->
Format.eprintf "Info: failed to write stats to %s@\n%s@\n%s@\n%s@."

@ -0,0 +1,69 @@
(*
* Copyright (c) 2017 - present Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under the BSD style license found in the
* LICENSE file in the root directory of this source tree. An additional grant
* of patent rights can be found in the PATENTS file in the same directory.
*)
open! IStd
module L = Logging
module F = Format
type closure = unit -> unit
type t = {
closures: closure list;
continuations: closure Queue.t;
}
type tasks = t
let create ?(continuation = None) closures =
let continuations = match continuation with
| None ->
Queue.create ()
| Some closure ->
Queue.singleton closure in
{ closures; continuations }
let empty = { closures = []; continuations = Queue.create () }
(* Aggregate closures into groups of the given size *)
let aggregate ~size t =
let group_to_closure group =
fun () -> List.iter ~f:(fun closure -> closure ()) group in
if size > 1
then
let groups = List.groupi ~break:(fun n _ _ -> Int.equal (n mod size) 0) t.closures in
let closures = List.map ~f:group_to_closure groups in
{ t with closures }
else
t
let run t =
List.iter ~f:(fun f -> f ()) t.closures;
Queue.iter ~f:(fun closure -> closure ()) t.continuations
module Runner = struct
type runner =
{ pool : Utils.ProcessPool.t;
all_continuations : closure Queue.t }
let create ~jobs =
{ pool = Utils.ProcessPool.create ~jobs;
all_continuations = Queue.create () }
let start runner ~tasks =
let pool = runner.pool in
Queue.enqueue_all runner.all_continuations (Queue.to_list tasks.continuations);
List.iter
~f:(fun x -> Utils.ProcessPool.start_child ~f:(fun f -> f ()) ~pool x)
tasks.closures
let complete runner =
Utils.ProcessPool.wait_all runner.pool;
Queue.iter ~f:(fun f -> f ()) runner.all_continuations
end

@ -0,0 +1,46 @@
(*
* Copyright (c) 2017 - present Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under the BSD style license found in the
* LICENSE file in the root directory of this source tree. An additional grant
* of patent rights can be found in the PATENTS file in the same directory.
*)
open! IStd
(** A sequence of tasks that can be executed in parallel,
with a continuation to be executed at the end *)
type t
type tasks = t
(** Each task/continuation executes a closure *)
type closure = unit -> unit
(* Aggregate closures into groups of the given size *)
val aggregate : size:int -> t -> t
(** Create tasks with a list of closures to be executed in parallel,
and an optional continuation to be executed afterwards *)
val create : ?continuation:(closure option) -> closure list -> t
(** No-op tasks *)
val empty : t
(** Run the closures and continuation *)
val run : t -> unit
module Runner : sig
(** A runner accepts new tasks repeatedly for parallel execution *)
type runner
(** Create a runner *)
val create: jobs:int -> runner
(** Start the given tasks with the runner *)
val start : runner -> tasks:t -> unit
(** Complete all the outstanding tasks *)
val complete : runner -> unit
end

@ -15,27 +15,6 @@ module CLOpt = CommandLineOption
(** Module to create a makefile with dependencies between clusters *)
let cluster_should_be_analyzed cluster =
let fname = DB.source_dir_to_string cluster in
let in_ondemand_config =
Option.map ~f:(fun dirs -> String.Set.mem dirs fname) Ondemand.dirs_to_analyze in
let check_modified () =
let modified =
DB.file_was_updated_after_start (DB.filename_from_string fname) in
if modified &&
Config.developer_mode
then L.stdout "Modified: %s@." fname;
modified in
begin
match in_ondemand_config with
| Some b -> (* ondemand config file is specified *)
b
| None when Config.reactive_mode ->
check_modified ()
| None ->
true
end
let pp_prolog fmt clusters =
let escape = Escape.escape_map (fun c -> if Char.equal c '#' then Some "\\#" else None) in
@ -52,9 +31,8 @@ let pp_prolog fmt clusters =
F.fprintf fmt "CLUSTERS=";
List.iteri
~f:(fun i cl ->
if cluster_should_be_analyzed cl
then F.fprintf fmt "%a " Cluster.pp_cluster_name (i+1))
~f:(fun i _ ->
F.fprintf fmt "%a " Cluster.pp_cluster_name (i+1))
clusters;
F.fprintf fmt "@.@.default: test@.@.all: test@.@.";

@ -72,9 +72,6 @@ type t =
(** initial state, used to add cg's *)
type initial = t
(** freeze the execution environment, so it can be queried *)
let freeze exe_env = exe_env (* TODO: unclear what this function is used for *)
(** create a new execution environment *)
let create () =
{ cg = Cg.create None;
@ -97,7 +94,7 @@ let add_cg (exe_env: t) (source_dir : DB.source_dir) =
List.iter
~f:(fun pname ->
(match AttributesTable.find_file_capturing_procedure pname with
(match AttributesTable.find_file_capturing_procedure ~cache:false pname with
| None ->
()
| Some (source_captured, origin) ->
@ -124,7 +121,7 @@ let get_file_data exe_env pname =
with Not_found ->
begin
let source_file_opt =
match AttributesTable.load_attributes pname with
match AttributesTable.load_attributes ~cache:true pname with
| None ->
L.err "can't find tenv_cfg_object for %a@." Typ.Procname.pp pname;
None
@ -188,6 +185,12 @@ let get_proc_desc exe_env pname =
| None ->
None
(** Create an exe_env from a source dir *)
let from_cluster cluster =
let exe_env = create () in
add_cg exe_env cluster;
exe_env
(** [iter_files f exe_env] applies [f] to the filename and tenv and cfg for each file in [exe_env] *)
let iter_files f exe_env =
let do_file _ file_data seen_files_acc =

@ -18,16 +18,13 @@ type initial
(** execution environment: a global call graph, and map from procedure names to cfg and tenv *)
type t
(** freeze the execution environment, so it can be queried *)
val freeze : initial -> t
(** create a new execution environment *)
val create : unit -> initial
(** add call graph from the source dir in the spec db,
with relative tenv and cfg, to the execution environment *)
val add_cg : initial -> DB.source_dir -> unit
(** Create an exe_env from a cluster *)
val from_cluster : Cluster.t -> t
(** get the global call graph *)
val get_cg : t -> Cg.t

@ -962,13 +962,6 @@ let execute_filter_prop wl tenv pdesc init_node (precondition : Prop.normal Spec
do_after_node source init_node;
None
(** get all the nodes in the current call graph with their defined children *)
let get_procs_and_defined_children call_graph =
List.map
~f:(fun (n, ns) ->
(n, Typ.Procname.Set.elements ns))
(Cg.get_nodes_and_defined_children call_graph)
let pp_intra_stats wl proc_desc fmt _ =
let nstates = ref 0 in
let nodes = Procdesc.get_nodes proc_desc in
@ -1387,9 +1380,11 @@ let perform_transition exe_env tenv proc_name source =
| _ -> ()
let interprocedural_algorithm exe_env : unit =
(* Create closures for the interprocedural algorithm *)
let interprocedural_algorithm_closures ~prepare_proc exe_env : Tasks.closure list =
let call_graph = Exe_env.get_cg exe_env in
let process_one_proc proc_name =
prepare_proc proc_name;
let analyze proc_desc =
ignore (Ondemand.analyze_proc_desc ~propagate_exceptions:false proc_desc proc_desc) in
match Exe_env.get_proc_desc exe_env proc_name with
@ -1399,13 +1394,14 @@ let interprocedural_algorithm exe_env : unit =
analyze proc_desc
| Some proc_desc -> analyze proc_desc
| None -> () in
List.iter ~f:process_one_proc (Cg.get_defined_nodes call_graph)
let procs_to_analyze = Cg.get_defined_nodes call_graph in
let create_closure proc_name =
fun () -> process_one_proc proc_name in
List.map ~f:create_closure procs_to_analyze
(** Perform the analysis of an exe_env *)
let do_analysis exe_env =
let cg = Exe_env.get_cg exe_env in
let procs_and_defined_children = get_procs_and_defined_children cg in
(** Create closures to perform the analysis of an exe_env *)
let do_analysis_closures exe_env : Tasks.closure list =
let get_calls caller_pdesc =
let calls = ref [] in
let f (callee_pname, loc) = calls := (callee_pname, loc) :: !calls in
@ -1430,15 +1426,6 @@ let do_analysis exe_env =
else None in
Specs.init_summary (nodes, proc_flags, calls, attributes, proc_desc_option) in
List.iter
~f:(fun (pn, _) ->
let should_init () =
Config.models_mode ||
is_none (Specs.get_summary pn) in
if should_init ()
then init_proc pn)
procs_and_defined_children;
let callbacks =
let get_proc_desc proc_name =
match Exe_env.get_proc_desc exe_env proc_name with
@ -1474,9 +1461,20 @@ let do_analysis exe_env =
get_proc_desc;
} in
let prepare_proc pn =
let should_init =
Config.models_mode ||
is_none (Specs.get_summary pn) in
if should_init then init_proc pn in
let closures =
List.map
~f:(fun closure () ->
Ondemand.set_callbacks callbacks;
interprocedural_algorithm exe_env;
Ondemand.unset_callbacks ()
closure ();
Ondemand.unset_callbacks ())
(interprocedural_algorithm_closures ~prepare_proc exe_env) in
closures
let visited_and_total_nodes ~filter cfg =
@ -1583,9 +1581,9 @@ let print_stats_cfg proc_shadowed source cfg =
L.out "%a" print_file_stats ();
save_file_stats ()
(** Print the stats for all the files in the exe_env *)
let print_stats exe_env =
if Config.developer_mode then
(** Print the stats for all the files in the cluster *)
let print_stats cluster =
let exe_env = Exe_env.from_cluster cluster in
Exe_env.iter_files
(fun source cfg ->
let proc_shadowed proc_desc =

@ -12,8 +12,8 @@ open! IStd
(** Interprocedural Analysis *)
(** Perform the analysis of an exe_env *)
val do_analysis : Exe_env.t -> unit
(** Create closures to perform the analysis of an exe_env *)
val do_analysis_closures : Exe_env.t -> Tasks.closure list
(** Print the stats for all the files in the exe_env *)
val print_stats : Exe_env.t -> unit
(** Print the stats for all the files in the cluster *)
val print_stats : Cluster.t -> unit

@ -63,7 +63,7 @@ let should_be_analyzed proc_name proc_attributes =
not (already_analyzed ()) (* avoid re-analysis of the same procedure *)
let procedure_should_be_analyzed proc_name =
match AttributesTable.load_attributes proc_name with
match AttributesTable.load_attributes ~cache:true proc_name with
| Some proc_attributes when Config.reactive_capture && not proc_attributes.is_defined ->
(* try to capture procedure first *)
let defined_proc_attributes = OndemandCapture.try_capture proc_attributes in

@ -482,7 +482,7 @@ let write_html_proc source proof_cover table_nodes_at_linenum global_err_log pro
let process_proc =
Procdesc.is_defined proc_desc &&
SourceFile.equal proc_loc.Location.file source &&
match AttributesTable.find_file_capturing_procedure proc_name with
match AttributesTable.find_file_capturing_procedure ~cache:true proc_name with
| None -> true
| Some (source_captured, _) ->
SourceFile.equal source_captured (Procdesc.get_loc proc_desc).file in
@ -588,9 +588,12 @@ let write_html_file linereader filename procs =
Errlog.pp_html filename [fname_encoding] fmt global_err_log;
Io_infer.Html.close (fd, fmt))
(** Create filename.ext.html for each file in the exe_env. *)
let write_all_html_files exe_env =
if Config.write_html then
(** Create filename.ext.html for each file in the cluster. *)
let write_all_html_files cluster =
let exe_env = Exe_env.from_cluster cluster in
let load_proc_desc pname = ignore (Exe_env.get_proc_desc exe_env pname) in
let () = List.iter ~f:load_proc_desc (Cg.get_defined_nodes (Exe_env.get_cg exe_env)) in
let linereader = LineReader.create () in
Exe_env.iter_files
(fun _ cfg ->

@ -49,5 +49,5 @@ val node_start_session : Procdesc.Node.t -> int -> SourceFile.t -> unit
The boolean indicates whether to print whole seconds only. *)
val write_proc_html : SourceFile.t -> bool -> Procdesc.t -> unit
(** Create filename.ext.html for each file in the exe_env. *)
val write_all_html_files : Exe_env.t -> unit
(** Create filename.ext.html for each file in the cluster. *)
val write_all_html_files : Cluster.t -> unit

@ -610,7 +610,7 @@ let proc_is_library proc_attributes =
*)
let proc_resolve_attributes proc_name =
let from_attributes_table () =
AttributesTable.load_attributes proc_name in
AttributesTable.load_attributes ~cache:true proc_name in
let from_specs () = match get_summary proc_name with
| Some summary ->
Some summary.attributes

@ -57,7 +57,7 @@ let check_block_retain_cycle tenv caller_pname prop block_nullified =
let mblock = Pvar.get_name block_nullified in
let block_pname = Typ.Procname.mangled_objc_block (Mangled.to_string mblock) in
let block_captured =
match AttributesTable.load_attributes block_pname with
match AttributesTable.load_attributes ~cache:true block_pname with
| Some attributes ->
fst (List.unzip attributes.ProcAttributes.captured)
| None ->
@ -476,7 +476,7 @@ let method_exists right_proc_name methods =
else (* ObjC/C++ case : The attribute map will only exist when we have code for the method or
the method has been called directly somewhere. It can still be that this is not the
case but we have a model for the method. *)
match AttributesTable.load_attributes right_proc_name with
match AttributesTable.load_attributes ~cache:true right_proc_name with
| Some attrs -> attrs.ProcAttributes.is_defined
| None -> Specs.summary_exists_in_models right_proc_name
@ -964,7 +964,7 @@ let execute_load ?(report_deref_errors=true) pname pdesc tenv id rhs_exp typ loc
[Prop.conjoin_eq tenv (Exp.Var id) undef prop_]
let load_ret_annots pname =
match AttributesTable.load_attributes pname with
match AttributesTable.load_attributes ~cache:true pname with
| Some attrs ->
let ret_annots, _ = attrs.ProcAttributes.method_annotation in
ret_annots
@ -1155,8 +1155,10 @@ let rec sym_exec tenv current_pdesc _instr (prop_: Prop.normal Prop.t) path
let attrs_opt =
let attr_opt = Option.map ~f:Procdesc.get_attributes callee_pdesc_opt in
match attr_opt, resolved_pname with
| Some attrs, Typ.Procname.ObjC_Cpp _ -> Some attrs
| None, Typ.Procname.ObjC_Cpp _ -> AttributesTable.load_attributes resolved_pname
| Some attrs, Typ.Procname.ObjC_Cpp _ ->
Some attrs
| None, Typ.Procname.ObjC_Cpp _ ->
AttributesTable.load_attributes ~cache:true resolved_pname
| _ -> None in
let objc_property_accessor_ret_typ_opt =
match attrs_opt with
@ -1357,7 +1359,7 @@ and add_constraints_on_actuals_by_ref tenv prop actuals_by_ref callee_pname call
else havoc_actual_by_ref in
let non_const_actuals_by_ref =
let is_not_const (e, _, i) =
match AttributesTable.load_attributes callee_pname with
match AttributesTable.load_attributes ~cache:true callee_pname with
| Some attrs ->
let is_const = List.mem ~equal:Int.equal attrs.ProcAttributes.const_formals i in
if is_const then (

@ -1108,6 +1108,11 @@ and patterns_skip_translation =
CLOpt.mk_json ~deprecated:["skip_translation"] ~long
"Matcher or list of matchers for names of files that should not be analyzed at all.")
and per_procedure_parallelism =
CLOpt.mk_bool ~deprecated:["per-procedure-parallelism"] ~long:"per-procedure-parallelism"
"Perform analysis with per-procedure parallelism.\n\
Java is not supported."
and pmd_xml =
CLOpt.mk_bool ~long:"pmd-xml"
~parse_mode:CLOpt.(Infer [Driver])
@ -1652,6 +1657,7 @@ and out_file_cmdline = !out_file
and patterns_never_returning_null = match patterns_never_returning_null with (k,r) -> (k,!r)
and patterns_skip_translation = match patterns_skip_translation with (k,r) -> (k,!r)
and patterns_modeled_expensive = match patterns_modeled_expensive with (k,r) -> (k,!r)
and per_procedure_parallelism = !per_procedure_parallelism
and pmd_xml = !pmd_xml
and precondition_stats = !precondition_stats
and print_logs = !print_logs

@ -126,6 +126,7 @@ val os_type : os_type
val patterns_modeled_expensive : string * Yojson.Basic.json
val patterns_never_returning_null : string * Yojson.Basic.json
val patterns_skip_translation : string * Yojson.Basic.json
val per_procedure_parallelism : bool
val perf_stats_prefix : string
val proc_stats_filename : string
val property_attributes : string

@ -90,9 +90,17 @@ let append_crc_cutoff ?(key="") name =
Utils.string_crc_hex32 name_for_crc in
name_up_to_cutoff ^ Char.to_string crc_token ^ crc_str
(* Lengh of .crc part: 32 characters of digest, plus 1 character of crc_token *)
let dot_crc_len = 1 + 32
let strip_crc str =
(* Strip 32 characters of digest, plus 1 character of crc_token *)
String.sub ~pos:0 ~len:(String.length str - 33) str
Core.Std.String.slice str 0 (- dot_crc_len)
let string_crc_has_extension ~ext name_crc =
let name = strip_crc name_crc in
match Filename.split_extension name with
| (_, Some ext') -> String.equal ext ext'
| (_, None) -> false
(** string encoding of a source file (including path) as a single filename *)
let encoding source_file =

@ -9,21 +9,35 @@
type t [@@deriving compare]
(** equality of source files *)
val equal : t -> t -> bool
(** Maps from source_file *)
module Map : Map.S with type key = t
(** Set of source files *)
module Set : Set.S with type elt = t
(** compute line count of a source file *)
val line_count : t -> int
module UNSAFE : sig
(** Create a SourceFile from any path. This is unchecked and should not be
used when the existence of source files is a requirement. Furthermore,
absolute paths won't be made relative to project root.*)
val from_string : string -> t
end
(** Append a crc to the string, using string_crc_hex32.
Cut the string if it exceeds the cutoff limit.
Use an optional key to compute the crc. *)
val append_crc_cutoff : ?key:string -> string -> string
(** Set of files read from --changed-files-index file, None if option not specified
NOTE: it may include extra source_files if --changed-files-index contains paths to
header files *)
val changed_files_set : Set.t option
(** empty source file *)
val empty : t
(** equality of source files *)
val equal : t -> t -> bool
(** create source file from absolute path *)
val from_abs_path : string -> t
@ -34,48 +48,37 @@ val create : string -> t
(** string encoding of a source file (including path) as a single filename *)
val encoding : t -> string
(** convert a source file to a string
WARNING: result may not be valid file path, do not use this function to perform operations
on filenames *)
val to_string : t -> string
(** pretty print t *)
val pp : Format.formatter -> t -> unit
(** get the full path of a source file *)
val to_abs_path : t -> string
(** get the relative path of a source file *)
val to_rel_path : t -> string
val is_infer_model : t -> bool
(** Returns true if the file is a C++ model *)
val is_cpp_model : t -> bool
val is_infer_model : t -> bool
(** Returns true if the file is in project root *)
val is_under_project_root : t -> bool
(** compute line count of a source file *)
val line_count : t -> int
(** Return approximate source file corresponding to the parameter if it's header file and
file exists. returns None otherwise *)
val of_header : t -> t option
(** Set of files read from --changed-files-index file, None if option not specified
NOTE: it may include extra source_files if --changed-files-index contains paths to
header files *)
val changed_files_set : Set.t option
(** pretty print t *)
val pp : Format.formatter -> t -> unit
(** Append a crc to the string, using string_crc_hex32.
Cut the string if it exceeds the cutoff limit.
Use an optional key to compute the crc. *)
val append_crc_cutoff : ?key:string -> string -> string
(** Remove the crc from the string, and check if it has the given extension *)
val string_crc_has_extension : ext:string -> string -> bool
(** Strip any crc attached to any string generated by string_append_crc_cutoff *)
val strip_crc : string -> string
module UNSAFE : sig
(** Create a SourceFile from any path. This is unchecked and should not be
used when the existence of source files is a requirement. Furthermore,
absolute paths won't be made relative to project root.*)
val from_string : string -> t
end
(** get the full path of a source file *)
val to_abs_path : t -> string
(** get the relative path of a source file *)
val to_rel_path : t -> string
(** convert a source file to a string
WARNING: result may not be valid file path, do not use this function to perform operations
on filenames *)
val to_string : t -> string

@ -291,8 +291,62 @@ let activate_run_epilogues_on_signal = lazy (
Signal.Expert.handle Signal.int run_epilogues_on_signal
)
(* Keep track of whether the current execution is in a child process *)
let in_child = ref false
module ProcessPool = struct
type t =
{
mutable num_processes : int;
jobs : int;
}
let create ~jobs =
{
num_processes = 0;
jobs;
}
let incr counter =
counter.num_processes <- counter.num_processes + 1
let decr counter =
counter.num_processes <- counter.num_processes - 1
let wait counter =
let _ = Unix.wait `Any in
decr counter
let wait_all counter =
for _ = 1 to counter.num_processes do
wait counter
done
let should_wait counter =
counter.num_processes >= counter.jobs
let start_child ~f ~pool x =
match Unix.fork () with
| `In_the_child ->
in_child := true;
f x;
exit 0
| `In_the_parent _pid ->
incr pool;
if should_wait pool
then wait pool
end
let iteri_parallel ~f ?(jobs=1) l =
let pool = ProcessPool.create ~jobs in
List.iteri ~f:(fun i x -> ProcessPool.start_child ~f:(f i) ~pool x) l;
ProcessPool.wait_all pool
let iter_parallel ~f ?(jobs=1) l =
iteri_parallel ~f:(fun _ x -> f x) ~jobs l
let register_epilogue f desc =
let f_no_exn () =
if not !in_child then
try f ()
with exn ->
F.eprintf "Error while running epilogue %s:@ %a.@ Powering through...@." desc Exn.pp exn in

@ -81,6 +81,27 @@ val suppress_stderr2 : ('a -> 'b -> 'c) -> 'a -> 'b -> 'c
The versions are strings of the shape "n.m.t", the order is lexicographic. *)
val compare_versions : string -> string -> int
(** Like List.iter but operates in parallel up to a number of jobs *)
val iter_parallel : f:('a -> unit) -> ?jobs:int -> 'a list -> unit
(** Like List.iteri but operates in parallel up to a number of jobs *)
val iteri_parallel : f:(int -> 'a -> unit) -> ?jobs:int -> 'a list -> unit
(** Pool of processes to execute in parallel up to a number of jobs. *)
module ProcessPool : sig
type t
(** Create a new pool of processes *)
val create : jobs:int -> t
(** Start a new child process in the pool.
If all the jobs are taken, wait until one is free. *)
val start_child : f:('a -> unit) -> pool:t -> 'a -> unit
(** Wait until all the currently executing processes terminate *)
val wait_all : t -> unit
end
(** Register a function to run when the program exits or is interrupted. Registered functions are
run in the reverse order in which they were registered. *)
val register_epilogue : (unit -> unit) -> string -> unit

Loading…
Cancel
Save