Keep only on-demand infrastructure and delete the rest or the infrastructure.

Summary:public
Remove back-end infrastructure that exists only when on-demand mode is disabled.
This, together with removing a few command-line options, sheds a lot of weight in the back-end.
No changes expected for on-demand mode.

Reviewed By: sblackshear

Differential Revision: D2960242

fb-gh-sync-id: 220d821
shipit-source-id: 220d821
master
Cristiano Calcagno 9 years ago committed by Facebook Github Bot 1
parent 761902afad
commit 89a2f2a7b4

@ -140,8 +140,7 @@ let iterate_callbacks store_summary call_graph exe_env =
(* analyze all the currently defined procedures *)
Cg.get_defined_nodes call_graph in
let originally_defined_procs =
(* all the defined procedures, even if we are analyzing a restricted subset *)
Cg.get_originally_defined_nodes call_graph in
Cg.get_defined_nodes call_graph in
let saved_language = !Config.curr_language in
let cluster_id proc_name =
@ -163,7 +162,6 @@ let iterate_callbacks store_summary call_graph exe_env =
let attributes_opt =
Specs.proc_resolve_attributes proc_name in
let should_reset =
not !Config.ondemand_enabled ||
Specs.get_summary proc_name = None in
if should_reset
then Specs.reset_summary call_graph proc_name attributes_opt in

@ -21,14 +21,25 @@ type in_out_calls =
}
type node_info =
{ mutable defined : bool; (** defined procedure as opposed to just declared *)
mutable disabled : bool; (** originally defined procedures disabled by restrict_defined *)
{
(** defined procedure as opposed to just declared *)
mutable defined : bool;
mutable parents: Procname.Set.t;
mutable children: Procname.Set.t;
mutable ancestors : Procname.Set.t option ; (** ancestors are computed lazily *)
mutable heirs : Procname.Set.t option ; (** heirs are computed lazily *)
mutable recursive_dependents : Procname.Set.t option ; (** recursive dependents are computed lazily *)
mutable in_out_calls : in_out_calls option; (** calls are computed lazily *)
(** ancestors are computed lazily *)
mutable ancestors : Procname.Set.t option;
(** heirs are computed lazily *)
mutable heirs : Procname.Set.t option;
(** recursive dependents are computed lazily *)
mutable recursive_dependents : Procname.Set.t option;
(** calls are computed lazily *)
mutable in_out_calls : in_out_calls option;
}
(** Type for call graph *)
@ -44,17 +55,15 @@ let create () =
nLOC = !Config.nLOC;
node_map = Procname.Hash.create 3 }
let _add_node g n defined disabled =
let add_node g n ~defined =
try
let info = Procname.Hash.find g.node_map n in
(* defined and disabled only go from false to true
to avoid accidental overwrite to false by calling add_edge *)
if defined then info.defined <- true;
if disabled then info.disabled <- true;
with Not_found ->
let info =
{ defined = defined;
disabled = disabled;
parents = Procname.Set.empty;
children = Procname.Set.empty;
ancestors = None;
@ -64,10 +73,8 @@ let _add_node g n defined disabled =
Procname.Hash.add g.node_map n info
let add_defined_node g n =
_add_node g n true false
add_node g n ~defined:true
let add_disabled_node g n =
_add_node g n false true
(** Compute the ancestors of the node, if not already computed *)
let compute_ancestors g node =
@ -143,15 +150,9 @@ let node_defined (g: t) n =
info.defined
with Not_found -> false
let node_set_defined (g: t) n defined =
try
let info = Procname.Hash.find g.node_map n in
info.defined <- defined
with Not_found -> ()
let add_edge g nfrom nto =
_add_node g nfrom false false;
_add_node g nto false false;
add_node g nfrom ~defined:false;
add_node g nto ~defined:false;
let info_from = Procname.Hash.find g.node_map nfrom in
let info_to = Procname.Hash.find g.node_map nto in
info_from.children <- Procname.Set.add nto info_from.children;
@ -164,20 +165,6 @@ let node_map_iter f g =
let cmp ((n1: Procname.t), _) ((n2: Procname.t), _) = Procname.compare n1 n2 in
IList.iter (fun (n, info) -> f n info) (IList.sort cmp !table)
(** If not None, restrict defined nodes to the given set,
and mark them as disabled. *)
let restrict_defined (g: t) (nodeset_opt: Procname.Set.t option) =
match nodeset_opt with
| None -> ()
| Some nodeset ->
let f node info =
if info.defined && not (Procname.Set.mem node nodeset) then
begin
info.defined <- false;
info.disabled <- true
end in
node_map_iter f g
let get_nodes (g: t) =
let nodes = ref Procname.Set.empty in
let f node _ =
@ -212,7 +199,11 @@ let node_get_num_ancestors g n =
let get_edges (g: t) : ((node * int) * (node * int)) list =
let edges = ref [] in
let f node info =
Procname.Set.iter (fun nto -> edges := (node_get_num_ancestors g node, node_get_num_ancestors g nto)::!edges) info.children in
Procname.Set.iter
(fun nto ->
edges :=
(node_get_num_ancestors g node, node_get_num_ancestors g nto) :: !edges)
info.children in
node_map_iter f g;
!edges
@ -228,7 +219,7 @@ let get_defined_children (g: t) n =
let get_parents (g: t) n =
(Procname.Hash.find g.node_map n).parents
(** [call_recursively source dest] returns [true] if function [source] recursively calls function [dest] *)
(** Check if [source] recursively calls [dest] *)
let calls_recursively (g: t) source dest =
Procname.Set.mem source (get_ancestors g dest)
@ -237,7 +228,6 @@ let get_nonrecursive_dependents (g: t) n =
let is_not_recursive pn = not (Procname.Set.mem pn (get_ancestors g n)) in
let res0 = Procname.Set.filter is_not_recursive (get_all_children g n) in
let res = Procname.Set.filter (node_defined g) res0 in
(* L.err "Nonrecursive dependents of %s: %a@\n" n pp_nodeset res; *)
res
(** Return the ancestors of [n] which are also heirs of [n] *)
@ -245,7 +235,6 @@ let compute_recursive_dependents (g: t) n =
let reached_from_n pn = Procname.Set.mem n (get_ancestors g pn) in
let res0 = Procname.Set.filter reached_from_n (get_ancestors g n) in
let res = Procname.Set.filter (node_defined g) res0 in
(* L.err "Recursive dependents of %s: %a@\n" n pp_nodeset res; *)
res
(** Compute the ancestors of [n] which are also heirs of [n], if not pre-computed already *)
@ -270,7 +259,7 @@ let get_nodes_and_defined_children (g: t) =
IList.map (fun n -> (n, get_defined_children g n)) nodes_list
type nodes_and_edges =
(node * bool * bool) list * (* nodes with defined and disabled flag *)
(node * bool) list * (* nodes with defined flag *)
(node * node) list (* edges *)
(** Return the list of nodes, with defined+disabled flags, and the list of edges *)
@ -280,7 +269,7 @@ let get_nodes_and_edges (g: t) : nodes_and_edges =
let do_children node nto =
edges := (node, nto) :: !edges in
let f node info =
nodes := (node, info.defined, info.disabled) :: !nodes;
nodes := (node, info.defined) :: !nodes;
Procname.Set.iter (do_children node) info.children in
node_map_iter f g;
(!nodes, !edges)
@ -288,20 +277,9 @@ let get_nodes_and_edges (g: t) : nodes_and_edges =
(** Return the list of nodes which are defined *)
let get_defined_nodes (g: t) =
let (nodes, _) = get_nodes_and_edges g in
let get_node (node, _, _) = node in
let get_node (node, _) = node in
IList.map get_node
(IList.filter (fun (_, defined, _) -> defined)
nodes)
(** Return the list of nodes which were originally defined,
i.e. the nodes that were defined before calling restrict_defined. *)
let get_originally_defined_nodes (g: t) =
let (nodes, _) = get_nodes_and_edges g in
let get_node (node, _, _) = node in
IList.map get_node
(IList.filter
(fun (_, defined, disabled) -> defined || disabled)
(IList.filter (fun (_, defined) -> defined)
nodes)
(** Return the path of the source file *)
@ -312,15 +290,17 @@ let get_source (g: t) =
let get_nLOC (g: t) =
g.nLOC
(** [extend cg1 gc2] extends [cg1] in place with nodes and edges from [gc2]; undefined nodes become defined if at least one side is. *)
(** [extend cg1 gc2] extends [cg1] in place with nodes and edges from [gc2];
undefined nodes become defined if at least one side is. *)
let extend cg_old cg_new =
let nodes, edges = get_nodes_and_edges cg_new in
IList.iter (fun (node, defined, disabled) -> _add_node cg_old node defined disabled) nodes;
IList.iter (fun (node, defined) -> add_node cg_old node ~defined) nodes;
IList.iter (fun (nfrom, nto) -> add_edge cg_old nfrom nto) edges
(** Begin support for serialization *)
let callgraph_serializer : (DB.source_file * int * nodes_and_edges) Serialization.serializer = Serialization.create_serializer Serialization.cg_key
let callgraph_serializer : (DB.source_file * int * nodes_and_edges) Serialization.serializer =
Serialization.create_serializer Serialization.cg_key
(** Load a call graph from a file *)
let load_from_file (filename : DB.filename) : t option =
@ -329,9 +309,8 @@ let load_from_file (filename : DB.filename) : t option =
| None -> None
| Some (source, nLOC, (nodes, edges)) ->
IList.iter
(fun (node, defined, disabled) ->
if defined then add_defined_node g node;
if disabled then add_disabled_node g node)
(fun (node, defined) ->
if defined then add_defined_node g node)
nodes;
IList.iter (fun (nfrom, nto) -> add_edge g nfrom nto) edges;
g.source <- source;
@ -340,7 +319,10 @@ let load_from_file (filename : DB.filename) : t option =
(** Save a call graph into a file *)
let store_to_file (filename : DB.filename) (call_graph : t) =
Serialization.to_file callgraph_serializer filename (call_graph.source, call_graph.nLOC, (get_nodes_and_edges call_graph))
Serialization.to_file
callgraph_serializer
filename
(call_graph.source, call_graph.nLOC, (get_nodes_and_edges call_graph))
let pp_graph_dotty get_specs (g: t) fmt =
let nodes_with_calls = get_all_nodes g in
@ -352,13 +334,22 @@ let pp_graph_dotty get_specs (g: t) fmt =
let pp_node fmt (n, _) =
F.fprintf fmt "\"%s\"" (Procname.to_filename n) in
let pp_node_label fmt (n, calls) =
F.fprintf fmt "\"%a | calls=%d %d | specs=%d)\"" Procname.pp n calls.in_calls calls.out_calls (num_specs n) in
F.fprintf fmt "\"%a | calls=%d %d | specs=%d)\""
Procname.pp n calls.in_calls calls.out_calls (num_specs n) in
F.fprintf fmt "digraph {@\n";
IList.iter (fun nc -> F.fprintf fmt "%a [shape=box,label=%a,color=%s,shape=%s]@\n" pp_node nc pp_node_label nc (get_color nc) (get_shape nc)) nodes_with_calls;
IList.iter (fun (s, d) -> F.fprintf fmt "%a -> %a@\n" pp_node s pp_node d) (get_edges g);
IList.iter
(fun nc ->
F.fprintf fmt "%a [shape=box,label=%a,color=%s,shape=%s]@\n"
pp_node nc pp_node_label nc (get_color nc) (get_shape nc))
nodes_with_calls;
IList.iter
(fun (s, d) ->
F.fprintf fmt "%a -> %a@\n" pp_node s pp_node d)
(get_edges g);
F.fprintf fmt "}@."
(** Print the current call graph as a dotty file. If the filename is [None], use the current file dir inside the DB dir. *)
(** Print the current call graph as a dotty file.
If the filename is [None], use the current file dir inside the DB dir. *)
let save_call_graph_dotty fname_opt get_specs (g: t) =
let fname_dot = match fname_opt with
| None -> DB.Results_dir.path_to_filename DB.Results_dir.Abs_source_dir ["call_graph.dot"]
@ -367,17 +358,3 @@ let save_call_graph_dotty fname_opt get_specs (g: t) =
let fmt = F.formatter_of_out_channel outc in
pp_graph_dotty get_specs g fmt;
close_out outc
(*
let pp_nodeset fmt set =
let f node = F.fprintf fmt "%a@ " Procname.pp node in
Procname.Set.iter f set
let map_option f l =
let lo = IList.filter (function | Some _ -> true | None -> false) (IList.map f l) in
IList.map (function Some p -> p | None -> assert false) lo
(** Return true if [n] is recursive *)
let is_recursive (g: t) n =
Procname.Set.mem n (get_ancestors g n)
*)

@ -31,8 +31,7 @@ val add_edge : t -> Procname.t -> Procname.t -> unit
(** Add a node to the call graph as defined *)
val add_defined_node : t -> Procname.t -> unit
(** [calls_recursively g source dest] returns [true] if function [source]
recursively calls function [dest] *)
(** Check if [source] recursively calls [dest] *)
val calls_recursively: t -> Procname.t -> Procname.t -> bool
(** Create an empty call graph *)
@ -57,10 +56,6 @@ val get_calls : t -> Procname.t -> in_out_calls
(** Return the list of nodes which are defined *)
val get_defined_nodes : t -> Procname.t list
(** Return the list of nodes which were originally defined,
i.e. the nodes that were defined before calling restrict_defined. *)
val get_originally_defined_nodes : t -> Procname.t list
(** Return the children of [n] which are defined *)
val get_defined_children: t -> Procname.t -> Procname.Set.t
@ -76,8 +71,8 @@ val get_nodes_and_calls : t -> (Procname.t * in_out_calls) list
(** Return all the nodes with their defined children *)
val get_nodes_and_defined_children : t -> (Procname.t * Procname.Set.t) list
(** Return the list of nodes, with defined+disabled flag, and the list of edges *)
val get_nodes_and_edges : t -> (Procname.t * bool * bool) list * (Procname.t * Procname.t) list
(** Return the list of nodes, with defined flag, and the list of edges *)
val get_nodes_and_edges : t -> (Procname.t * bool) list * (Procname.t * Procname.t) list
(** Return the children of [n] which are not heirs of [n] and are defined *)
val get_nonrecursive_dependents : t -> Procname.t -> Procname.Set.t
@ -97,16 +92,9 @@ val load_from_file : DB.filename -> t option
(** Returns true if the node is defined *)
val node_defined : t -> Procname.t -> bool
(** If not None, restrict defined nodes to the given set,
and mark them as disabled. *)
val restrict_defined : t -> Procname.Set.t option -> unit
(** Print the current call graph as a dotty file. If the filename is [None],
use the current file dir inside the DB dir. *)
val save_call_graph_dotty : DB.filename option -> (Procname.t -> 'a list) -> t -> unit
(** Save a call graph into a file *)
val store_to_file : DB.filename -> t -> unit
(** Change the defined flag of a node *)
val node_set_defined : t -> Procname.t -> bool -> unit

@ -12,24 +12,8 @@ module F = Format
(** Module to process clusters of procedures. *)
(** if true, print tracing information for functions that manipulate clusters *)
let trace_clusters = ref false
(** cluster element: the file name, the number of procedures defined in it,
and the list of active procedures.
A procedure is active if it is defined only in this file,
or if it is defined in several files and this
is the representative file for it (see Exe_env.add_cg) *)
type elem =
{
ce_active_procs : Procname.t list; (** list of active procedures *)
ce_file : DB.source_file;
ce_naprocs : int; (** number of active procedures defined in the file *)
ce_ondemand : DB.source_dir option; (** if present, the other fields are unused *)
}
(** cluster of files *)
type t = elem list
(** a cluster is a file *)
type t = DB.source_dir
(** type stored in .cluster file: (n,m,cl) indicates cl is cluster n out of m *)
type serializer_t = int * int * t
@ -46,157 +30,19 @@ let load_from_file (filename : DB.filename) : serializer_t option =
let store_to_file (filename : DB.filename) (serializer_t: serializer_t) =
Serialization.to_file serializer filename serializer_t
let get_ondemand_info ce =
ce.ce_ondemand
let one_cluster_per_procedure = true
let create_ondemand source_dir =
let defined_procs_opt =
if Ondemand.one_cluster_per_procedure () then
let cg_fname = DB.source_dir_get_internal_file source_dir ".cg" in
match Cg.load_from_file cg_fname with
| None -> None
| Some cg ->
Some (Cg.get_defined_nodes cg)
else
None in
let ce =
{
ce_active_procs = [];
ce_file = DB.source_file_from_string "";
ce_naprocs = 0;
ce_ondemand = Some source_dir;
} in
let mk_cluster pname =
[{ce with ce_active_procs = [pname]}] in
let clusters = match defined_procs_opt with
| None ->
[[ce]]
| Some defined_procs ->
IList.map mk_cluster defined_procs in
clusters
let create_bottomup source_file naprocs active_procs =
{
ce_active_procs = active_procs;
ce_file = source_file;
ce_naprocs = naprocs;
ce_ondemand = None;
}
let cluster_nfiles cluster = IList.length cluster
let cluster_naprocs cluster =
IList.fold_left (fun n ce -> ce.ce_naprocs + n) 0 cluster
let clusters_nfiles clusters =
IList.fold_left (fun n cluster -> cluster_nfiles cluster + n) 0 clusters
let clusters_naprocs clusters =
IList.fold_left (fun n cluster -> cluster_naprocs cluster + n) 0 clusters
let print_clusters_stats clusters =
let pp_cluster num cluster =
L.err "cluster #%d files: %d active procedures: %d@."
num
(cluster_nfiles cluster)
(cluster_naprocs cluster) in
let i = ref 0 in
IList.iter
(fun cluster ->
incr i;
pp_cluster !i cluster)
clusters
let cluster_split_prefix (cluster : t) size =
let rec split (cluster_seen : t) (cluster_todo : t) n =
if n <= 0 then (IList.rev cluster_seen, cluster_todo)
else match cluster_todo with
| [] -> raise Not_found
| ce :: todo' -> split (ce :: cluster_seen) todo' (n - ce.ce_naprocs) in
split [] cluster size
let combine_split_clusters (clusters : t list) max_size desired_size =
if !trace_clusters then L.err "[combine_split_clusters]@.";
let old_clusters = ref clusters in
let old_size = clusters_naprocs !old_clusters in
let new_clusters = ref [] in
let current = ref [] in
let current_size = ref 0 in
while !old_clusters != [] do
if old_size !=
clusters_naprocs !old_clusters + clusters_naprocs !new_clusters + !current_size
then begin
L.err "mismatch in invariant for cluster size@.";
assert (cluster_naprocs !current = !current_size);
L.err "old size: %d@." old_size;
L.err "old clusters size: %d@." (clusters_naprocs !old_clusters);
L.err "new clusters size: %d@." (clusters_naprocs !new_clusters);
L.err "current size: %d@." !current_size;
assert false
end;
let next_cluster = IList.hd !old_clusters in
let next_size = cluster_naprocs next_cluster in
let new_size = !current_size + next_size in
if (new_size > max_size || new_size > desired_size) && !current_size > 0 then
begin
new_clusters := !new_clusters @ [!current];
current := [];
current_size := 0
end
else if new_size > max_size then
begin
let next_cluster', next_cluster'' = cluster_split_prefix next_cluster max_size in
current := [];
current_size := 0;
new_clusters := !new_clusters @ [next_cluster'];
old_clusters := next_cluster'' :: (IList.tl !old_clusters)
end
else
begin
current := !current @ next_cluster;
current_size := !current_size + next_size;
old_clusters := IList.tl !old_clusters
end
done;
if !current_size > 0 then new_clusters := !new_clusters @ [!current];
!new_clusters
(** return the set of active procedures in a cluster *)
let get_active_procs cluster =
match !Config.ondemand_enabled, cluster with
| true, [{ce_active_procs = []}] ->
None
| _ ->
let procset = ref Procname.Set.empty in
let do_cluster_elem cluster_elem =
let add proc =
if not (Procname.Set.mem proc !procset) then
procset := Procname.Set.add proc !procset in
IList.iter add cluster_elem.ce_active_procs in
IList.iter do_cluster_elem cluster;
Some !procset
let cl_name n = "cl" ^ string_of_int n
let cl_file n = "x" ^ (cl_name n) ^ ".cluster"
let pp_cl fmt n = Format.fprintf fmt "%s" (cl_name n)
let pp_cluster_name fmt n = Format.fprintf fmt "%s" (cl_name n)
let pp_cluster_dependency nr tot_nr cluster print_files fmt dependents =
let pp_cluster nr tot_nr cluster fmt () =
let fname = cl_file nr in
let pp_cl fmt n = Format.fprintf fmt "%s" (cl_name n) in
store_to_file (DB.filename_from_string fname) (nr, tot_nr, cluster);
let pp_active_procs fmt cluster =
let procnames = match get_active_procs cluster with
| None ->
[]
| Some procset ->
Procname.Set.elements procset in
let pp_pname fmt pname = Format.fprintf fmt "%s" (Procname.to_string pname) in
F.fprintf fmt "procedures: %a" (pp_seq pp_pname) procnames in
let pp_file fmt ce = F.fprintf fmt "%s" (DB.source_file_to_string ce.ce_file) in
let pp_files fmt cluster = F.fprintf fmt "files: %a" (pp_seq pp_file) cluster in
F.fprintf fmt "%a : %a@\n" pp_cl nr (pp_seq pp_cl) dependents;
F.fprintf fmt "%a: @\n" pp_cl nr;
F.fprintf fmt "\t$(INFERANALYZE) -cluster %s >%a@\n" fname pp_cl nr;
if print_files then F.fprintf fmt "# %a %a" pp_files cluster pp_active_procs cluster;
F.fprintf fmt "@\n"
let print_clusters clusters =
let pp_cluster num =
L.err "cluster #%d @." num in
IList.iteri (fun i _ -> pp_cluster i) clusters

@ -0,0 +1,31 @@
(*
* Copyright (c) 2016 - present Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under the BSD style license found in the
* LICENSE file in the root directory of this source tree. An additional grant
* of patent rights can be found in the PATENTS file in the same directory.
*)
module L = Logging
module F = Format
(** Module to process clusters of procedures. *)
(** a cluster is a file *)
type t = DB.source_dir
(** type stored in .cluster file: (n,m,cl) indicates cl is cluster n out of m *)
type serializer_t = int * int * t
(** Load a cluster from a file *)
val load_from_file : DB.filename -> serializer_t option
(** Print a cluster *)
val pp_cluster : int -> int -> t -> F.formatter -> unit -> unit
(** Print a cluster name *)
val pp_cluster_name : F.formatter -> int -> unit
(** Print clusters *)
val print_clusters : t list -> unit

@ -25,42 +25,33 @@ let pp_prolog fmt clusters =
Sys.executable_name
(Escape.escape_map (fun c -> if c = '#' then Some "\\#" else None) !Config.results_dir);
F.fprintf fmt "OBJECTS=";
let filter cl = match cl with
[ce] ->
begin
match Cluster.get_ondemand_info ce with
| Some source_dir ->
let fname = DB.source_dir_to_string source_dir in
let in_ondemand_config =
match Ondemand.read_dirs_to_analyze () with
| None ->
None
| Some set ->
Some (StringSet.mem fname set) in
let check_modified () =
let modified =
DB.file_was_updated_after_start (DB.filename_from_string fname) in
if modified &&
!Config.developer_mode
then L.stdout "Modified: %s@." fname;
modified in
begin
match in_ondemand_config with
| Some b -> (* ondemand config file is specified *)
b
| None when !Config.reactive_mode ->
check_modified ()
| None ->
true
end
| None ->
true
end
| _ ->
true in
let filter source_dir =
let fname = DB.source_dir_to_string source_dir in
let in_ondemand_config =
match Ondemand.read_dirs_to_analyze () with
| None ->
None
| Some set ->
Some (StringSet.mem fname set) in
let check_modified () =
let modified =
DB.file_was_updated_after_start (DB.filename_from_string fname) in
if modified &&
!Config.developer_mode
then L.stdout "Modified: %s@." fname;
modified in
begin
match in_ondemand_config with
| Some b -> (* ondemand config file is specified *)
b
| None when !Config.reactive_mode ->
check_modified ()
| None ->
true
end in
IList.iteri
(fun i cl ->
if filter cl then F.fprintf fmt "%a " Cluster.pp_cl (i+1))
if filter cl then F.fprintf fmt "%a " Cluster.pp_cluster_name (i+1))
clusters;
F.fprintf fmt "@.@.default: test@.@.all: test@.@.";
F.fprintf fmt "test: $(OBJECTS)@.";
@ -70,46 +61,17 @@ let pp_epilog fmt () =
F.fprintf fmt "@.clean:@.\trm -f $(OBJECTS)@."
let create_cluster_makefile_and_exit
(clusters: Cluster.t list) (file_cg: Cg.t) (fname: string) (print_files: bool) =
(clusters: Cluster.t list) (fname: string) =
let outc = open_out fname in
let fmt = Format.formatter_of_out_channel outc in
let file_to_cluster = ref DB.SourceFileMap.empty in
let cluster_nr = ref 0 in
let tot_clusters_nr = IList.length clusters in
let do_cluster cluster =
incr cluster_nr;
let dependent_clusters = ref IntSet.empty in
let add_dependent file_as_pname =
let source_file = source_file_from_pname file_as_pname in
try
let num = DB.SourceFileMap.find source_file !file_to_cluster in
if num < !cluster_nr then
dependent_clusters := IntSet.add num !dependent_clusters
with Not_found ->
F.fprintf fmt "#[%a] missing dependency to %s@."
Cluster.pp_cl !cluster_nr
(DB.source_file_to_string source_file) in
let do_file ce = match Cluster.get_ondemand_info ce with
| Some source_dir ->
(* add comment to makefile to correlate source file and cluster number. *)
let pname_str = match ce.Cluster.ce_active_procs with
| [pname] -> Procname.to_string pname
| _ -> "" in
F.fprintf fmt "#%s %s@\n" (DB.source_dir_to_string source_dir) pname_str
| None ->
let source_file = ce.Cluster.ce_file in
let children =
try Cg.get_defined_children file_cg (source_file_to_pname source_file) with
| Not_found -> Procname.Set.empty in
Procname.Set.iter add_dependent children;
file_to_cluster :=
DB.SourceFileMap.add source_file !cluster_nr !file_to_cluster;
() (* L.err "file %s has %d children@." file (StringSet.cardinal children) *) in
IList.iter do_file cluster;
Cluster.pp_cluster_dependency
!cluster_nr tot_clusters_nr cluster print_files fmt (IntSet.elements !dependent_clusters);
(* L.err "cluster %d has %d dependencies@."
!cluster_nr (IntSet.cardinal !dependent_clusters) *) in
let do_file source_dir =
F.fprintf fmt "#%s@\n" (DB.source_dir_to_string source_dir) in
do_file cluster;
Cluster.pp_cluster !cluster_nr tot_clusters_nr cluster fmt () in
pp_prolog fmt clusters;
IList.iter do_cluster clusters;
pp_epilog fmt ();

@ -184,10 +184,6 @@ let eradicate = ref false
(** should the checkers be run? *)
let checkers_enabled () = not !eradicate
(** flag for ondemand mode:
procedure calls trigger a new analysis if the summary is not present already *)
let ondemand_enabled = ref false
(** flag for reactive mode:
the analysis starts from the files captured since the "infer" command started *)
let reactive_mode = ref false
@ -204,9 +200,6 @@ let idempotent_getters = ref true
(** if true, changes to code are checked at the procedure level; if false, at the file level *)
let incremental_procs = ref true
(** Flag to activate intraprocedural-only analysis *)
let intraprocedural = ref false
(** if active, join x+j and x+k for constants j and k *)
let join_plus = ref true
@ -227,9 +220,6 @@ let long_static_proc_names = ref false
(** Number of lines of code in original file *)
let nLOC = ref 0
(** max number of procedures in each cluster *)
let max_cluster_size = ref 2000
(** Maximum level of recursion during the analysis, after which a timeout is generated *)
let max_recursion = ref 5

@ -49,8 +49,6 @@ let new_file_data source nLOC cg_fname =
type t =
{ cg: Cg.t; (** global call graph *)
proc_map: file_data Procname.Hash.t; (** map from procedure name to file data *)
mutable active_opt : Procname.Set.t option; (** if not None, restrict the active procedures to the given set *)
mutable procs_defined_in_several_files : Procname.Set.t; (** Procedures defined in more than one file *)
mutable source_files : DB.SourceFileSet.t; (** Source files in the execution environment *)
}
@ -61,37 +59,20 @@ type initial = t
let freeze exe_env = exe_env (* TODO: unclear what this function is used for *)
(** create a new execution environment *)
let create procset_opt =
let create () =
{ cg = Cg.create ();
proc_map = Procname.Hash.create 17;
active_opt = procset_opt;
procs_defined_in_several_files = Procname.Set.empty;
source_files = DB.SourceFileSet.empty;
}
(** check if a procedure is marked as active *)
let proc_is_active exe_env proc_name =
match exe_env.active_opt with
| None -> true
| Some procset -> Procname.Set.mem proc_name procset
(** add a procedure to the set of active procedures *)
let add_active_proc exe_env proc_name =
match exe_env.active_opt with
| None -> ()
| Some procset -> exe_env.active_opt <- Some (Procname.Set.add proc_name procset)
(** add call graph from fname in the spec db,
with relative tenv and cfg, to the execution environment *)
let add_cg (exe_env: t) (source_dir : DB.source_dir) =
let cg_fname = DB.source_dir_get_internal_file source_dir ".cg" in
let cg_opt = match Cg.load_from_file cg_fname with
| None -> (L.stderr "cannot load %s@." (DB.filename_to_string cg_fname); None)
| Some cg ->
Cg.restrict_defined cg exe_env.active_opt;
Some cg in
match cg_opt with
| None -> None
match Cg.load_from_file cg_fname with
| None ->
L.stderr "cannot load %s@." (DB.filename_to_string cg_fname);
None
| Some cg ->
let source = Cg.get_source cg in
exe_env.source_files <- DB.SourceFileSet.add source exe_env.source_files;
@ -102,23 +83,17 @@ let add_cg (exe_env: t) (source_dir : DB.source_dir) =
IList.iter (fun pname ->
let should_update =
if Procname.Hash.mem exe_env.proc_map pname then
let old_source = (Procname.Hash.find exe_env.proc_map pname).source in
exe_env.procs_defined_in_several_files <-
Procname.Set.add pname exe_env.procs_defined_in_several_files;
(* L.err "Warning: procedure %a is defined in both %s and %s@."*)
(* Procname.pp pname (DB.source_file_to_string source)*)
(* (DB.source_file_to_string old_source); *)
source < old_source (* when a procedure is defined in several files,
map to the first alphabetically *)
let old_source =
(Procname.Hash.find exe_env.proc_map pname).source in
(* when a procedure is defined in several files, *)
(* map to the first alphabetically *)
source < old_source
else true in
if should_update
then Procname.Hash.replace exe_env.proc_map pname file_data) defined_procs;
then Procname.Hash.replace exe_env.proc_map pname file_data)
defined_procs;
Some cg
(** get the procedures defined in more than one file *)
let get_procs_defined_in_several_files exe_env =
exe_env.procs_defined_in_several_files
(** get the global call graph *)
let get_cg exe_env =
exe_env.cg

@ -19,8 +19,8 @@ type t
(** freeze the execution environment, so it can be queried *)
val freeze : initial -> t
(** create a new execution environment, given an optional set restricting the active procedures *)
val create : Procname.Set.t option -> initial
(** create a new execution environment *)
val create : unit -> initial
(** add call graph from the source dir in the spec db,
with relative tenv and cfg, to the execution environment *)
@ -29,9 +29,6 @@ val add_cg : initial -> DB.source_dir -> Cg.t option
(** get the global call graph *)
val get_cg : t -> Cg.t
(** get the procedures defined in more than one file *)
val get_procs_defined_in_several_files : t -> Procname.Set.t
(** return the source file associated to the procedure *)
val get_source : t -> Procname.t -> DB.source_file
@ -43,9 +40,3 @@ val get_cfg : t -> Procname.t -> Cfg.cfg
(** [iter_files f exe_env] applies [f] to the source file and tenv and cfg for each file in [exe_env] *)
val iter_files : (DB.source_file -> Cfg.cfg -> unit) -> t -> unit
(** check if a procedure is marked as active *)
val proc_is_active : t -> Procname.t -> bool
(** add a procedure to the set of active procedures *)
val add_active_proc : t -> Procname.t -> unit

@ -1,351 +0,0 @@
(*
* Copyright (c) 2009 - 2013 Monoidics ltd.
* Copyright (c) 2013 - present Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under the BSD style license found in the
* LICENSE file in the root directory of this source tree. An additional grant
* of patent rights can be found in the PATENTS file in the same directory.
*)
module L = Logging
module F = Format
(** Procedure name with weight given by the number of in-calls.
Used to form a set ordered by weight. *)
module WeightedPname =
struct
type t = (Procname.t * Cg.in_out_calls)
let compare (pn1, calls1) (pn2, calls2) =
let n = int_compare calls1.Cg.in_calls calls2.Cg.in_calls in
if n != 0 then n
else Procname.compare pn1 pn2
end
let weighted_pname_from_cg cg pname =
(pname, Cg.get_calls cg pname)
module WeightedPnameSet =
Set.Make(WeightedPname)
let pp_weightedpnameset fmt set =
let f (pname, _) = F.fprintf fmt "%a@ " Procname.pp pname in
WeightedPnameSet.iter f set
let compute_weighed_pnameset cg =
let set = ref WeightedPnameSet.empty in
let add_pname_calls (pn, _) =
set := WeightedPnameSet.add (weighted_pname_from_cg cg pn) !set in
IList.iter add_pname_calls (Cg.get_nodes_and_calls cg);
!set
(* Return true if there are no children of [pname] whose specs
have changed since [pname] was last analyzed. *)
let proc_is_up_to_date cg pname =
match Specs.get_summary pname with
| None -> false
| Some summary ->
let filter dependent_proc =
try
Specs.get_timestamp summary =
Procname.Map.find dependent_proc summary.Specs.dependency_map
with Not_found -> (* can happen in on-demand *)
true in
Procname.Set.for_all filter (Cg.get_defined_children cg pname)
(** Return the list of procedures which should perform a phase
transition from [FOOTPRINT] to [RE_EXECUTION] *)
let should_perform_transition cg proc_name : Procname.t list =
let recursive_dependents =
if !Config.ondemand_enabled then Procname.Set.empty
else Cg.get_recursive_dependents cg proc_name in
let recursive_dependents_plus_self = Procname.Set.add proc_name recursive_dependents in
let should_transition =
Specs.get_phase proc_name == Specs.FOOTPRINT &&
Procname.Set.for_all (proc_is_up_to_date cg) recursive_dependents in
if should_transition then Procname.Set.elements recursive_dependents_plus_self
else []
(** Perform the transition from [FOOTPRINT] to [RE_EXECUTION] in spec table *)
let transition_footprint_re_exe proc_name joined_pres =
L.out "Transition %a from footprint to re-exe@." Procname.pp proc_name;
let summary = Specs.get_summary_unsafe "transition_footprint_re_exe" proc_name in
let summary' =
if !Config.only_footprint then
{ summary with
Specs.phase = Specs.RE_EXECUTION;
}
else
let specs =
IList.map
(fun jp ->
Specs.spec_normalize
{ Specs.pre = jp;
posts = [];
visited = Specs.Visitedset.empty })
joined_pres in
let payload =
{ summary.Specs.payload with
Specs.preposts = Some specs; } in
let dependency_map =
Specs.re_initialize_dependency_map summary.Specs.dependency_map in
{ summary with
Specs.timestamp = 0;
phase = Specs.RE_EXECUTION;
dependency_map;
payload;
} in
Specs.add_summary proc_name summary'
module SpecMap = Map.Make (struct
type t = Prop.normal Specs.Jprop.t
let compare = Specs.Jprop.compare
end)
(** Update the specs of the current proc after the execution of one phase *)
let update_specs proc_name phase (new_specs : Specs.NormSpec.t list)
: Specs.NormSpec.t list * bool =
let new_specs = Specs.normalized_specs_to_specs new_specs in
let old_specs = Specs.get_specs proc_name in
let changed = ref false in
let current_specs =
ref
(IList.fold_left
(fun map spec ->
SpecMap.add
spec.Specs.pre
(Paths.PathSet.from_renamed_list spec.Specs.posts, spec.Specs.visited) map)
SpecMap.empty old_specs) in
let re_exe_filter old_spec = (* filter out pres which failed re-exe *)
if phase == Specs.RE_EXECUTION &&
not (IList.exists
(fun new_spec -> Specs.Jprop.equal new_spec.Specs.pre old_spec.Specs.pre)
new_specs)
then begin
changed:= true;
L.out "Specs changed: removing pre of spec@\n%a@." (Specs.pp_spec pe_text None) old_spec;
current_specs := SpecMap.remove old_spec.Specs.pre !current_specs end
else () in
let add_spec spec = (* add a new spec by doing union of the posts *)
try
let old_post, old_visited = SpecMap.find spec.Specs.pre !current_specs in
let new_post, new_visited =
Paths.PathSet.union
old_post
(Paths.PathSet.from_renamed_list spec.Specs.posts),
Specs.Visitedset.union old_visited spec.Specs.visited in
if not (Paths.PathSet.equal old_post new_post) then begin
changed := true;
L.out "Specs changed: added new post@\n%a@."
(Propset.pp pe_text (Specs.Jprop.to_prop spec.Specs.pre))
(Paths.PathSet.to_propset new_post);
current_specs :=
SpecMap.add spec.Specs.pre (new_post, new_visited)
(SpecMap.remove spec.Specs.pre !current_specs) end
with Not_found ->
changed := true;
L.out "Specs changed: added new pre@\n%a@." (Specs.Jprop.pp_short pe_text) spec.Specs.pre;
current_specs :=
SpecMap.add
spec.Specs.pre
((Paths.PathSet.from_renamed_list spec.Specs.posts), spec.Specs.visited)
!current_specs in
let res = ref [] in
let convert pre (post_set, visited) =
res :=
Specs.spec_normalize
{ Specs.pre = pre;
Specs.posts = Paths.PathSet.elements post_set;
Specs.visited = visited }:: !res in
IList.iter re_exe_filter old_specs; (* filter out pre's which failed re-exe *)
IList.iter add_spec new_specs; (* add new specs *)
SpecMap.iter convert !current_specs;
!res,!changed
module GlobalState =
struct
(** Total number of procedures to analyze *)
let tot_procs =
ref 0
(** Number of procedures done *)
let num_procs_done =
ref 0
(** Weighted pnames (procedure names with weight) to do *)
let wpnames_todo =
ref WeightedPnameSet.empty
end
module G = GlobalState
(** Return true if [pname] is done and requires no further analysis *)
let proc_is_done cg pname =
not (WeightedPnameSet.mem (weighted_pname_from_cg cg pname) !G.wpnames_todo)
(** flag to activate tracing of the algorithm *)
let trace = ref false
(** Return true if [pname] has just become done *)
let procs_become_done cg pname : Procname.t list =
let recursive_dependents = Cg.get_recursive_dependents cg pname in
let nonrecursive_dependents = Cg.get_nonrecursive_dependents cg pname in
let summary = Specs.get_summary_unsafe "procs_become_done" pname in
let is_done = Specs.get_timestamp summary <> 0 &&
(!Config.only_footprint || Specs.get_phase pname == Specs.RE_EXECUTION) &&
Procname.Set.for_all (proc_is_done cg) nonrecursive_dependents &&
Procname.Set.for_all (proc_is_up_to_date cg) recursive_dependents in
if !trace then L.err "proc is%s done@." (if is_done then "" else " not");
if is_done
then
let procs_to_remove =
(* the proc itself if not recursive, otherwise all the recursive circle *)
Procname.Set.add pname recursive_dependents in
Procname.Set.elements procs_to_remove
else []
let post_process_procs exe_env procs_done =
let check_no_specs pn =
if Specs.get_specs pn = [] then begin
Errdesc.warning_err
(Specs.get_summary_unsafe "post_process_procs" pn).Specs.attributes.ProcAttributes.loc
"No specs found for %a@." Procname.pp pn
end in
let cg = Exe_env.get_cg exe_env in
IList.iter (fun pn ->
let elem = weighted_pname_from_cg cg pn in
if WeightedPnameSet.mem elem !G.wpnames_todo then
begin
incr G.num_procs_done;
G.wpnames_todo := WeightedPnameSet.remove elem !G.wpnames_todo;
let whole_seconds = false in
check_no_specs pn;
Printer.proc_write_log whole_seconds (Exe_env.get_cfg exe_env pn) pn
end
) procs_done
(** Find the max string in the [set] which satisfies [filter],and count the number of attempts.
Precedence is given to strings in [priority_set] *)
let filter_max exe_env filter set priority_set =
let rec find_max n filter set =
let elem = WeightedPnameSet.max_elt set in
if filter elem then
begin
let proc_name = fst elem in
Config.footprint := Specs.get_phase proc_name = Specs.FOOTPRINT;
let file_name = Exe_env.get_source exe_env proc_name in
let action = if !Config.footprint then "Discovering" else "Verifying" in
L.err "@\n**%s specs: %a file: %s@\n"
action Procname.pp proc_name (DB.source_file_to_string file_name);
L.err " %d/%d procedures done@."
!G.num_procs_done !G.tot_procs;
elem
end
else
begin
find_max (n + 1) filter (WeightedPnameSet.remove elem set)
end in
try
(* try with priority elements first *)
find_max 1 filter (WeightedPnameSet.inter set priority_set)
with Not_found -> find_max 1 filter set
(** Main algorithm responsible for driving the analysis of an Exe_env (set of procedures).
The algorithm computes dependencies between procedures,
propagates results, and handles fixpoints in the call graph. *)
let main_algorithm exe_env analyze_proc process_result : unit =
let call_graph = Exe_env.get_cg exe_env in
let filter_initial (pname, _) =
let summary = Specs.get_summary_unsafe "main_algorithm" pname in
Specs.get_timestamp summary = 0 in
G.wpnames_todo :=
WeightedPnameSet.filter filter_initial (compute_weighed_pnameset call_graph);
let wpnames_address_of = (* subset of the procedures whose address is taken *)
let address_taken_of n =
Procname.Set.mem n (Cfg.get_priority_procnames (Exe_env.get_cfg exe_env n)) in
WeightedPnameSet.filter (fun (n, _) -> address_taken_of n) !G.wpnames_todo in
G.tot_procs := WeightedPnameSet.cardinal !G.wpnames_todo;
G.num_procs_done := 0;
let wpname_can_be_analyzed (pname, _) : bool =
(* Return true if [pname] is not up to date and it can be analyzed right now *)
Procname.Set.for_all
(proc_is_done call_graph) (Cg.get_nonrecursive_dependents call_graph pname) &&
(Specs.get_timestamp (Specs.get_summary_unsafe "main_algorithm" pname) = 0
|| not (proc_is_up_to_date call_graph pname)) in
let process_one_proc ((pname, _) as elem) =
DB.current_source :=
(Specs.get_summary_unsafe "main_algorithm" pname)
.Specs.attributes.ProcAttributes.loc.Location.file;
if !trace then
begin
let whole_seconds = false in
L.err "@[<v 3> *********** Summary of %a ***********@," Procname.pp pname;
L.err "%a@]@\n"
(Specs.pp_summary pe_text whole_seconds)
(Specs.get_summary_unsafe "main_algorithm" pname)
end;
Specs.update_dependency_map pname;
process_result exe_env elem (analyze_proc exe_env pname) in
while not (WeightedPnameSet.is_empty !G.wpnames_todo) do
begin
if !trace then begin
let analyzable_pnames = WeightedPnameSet.filter wpname_can_be_analyzed !G.wpnames_todo in
L.err "Nodes todo: %a@\n" pp_weightedpnameset !G.wpnames_todo;
L.err "Analyzable procs: %a@\n" pp_weightedpnameset analyzable_pnames
end;
try
let elem =
(** find max analyzable proc *)
filter_max exe_env wpname_can_be_analyzed !G.wpnames_todo wpnames_address_of in
process_one_proc elem
with Not_found -> (* no analyzable procs *)
L.err "Error: can't analyze any procs. Printing current spec table@\n@[<v>%a@]@."
(Specs.pp_spec_table pe_text false) ();
if !Config.ondemand_enabled then G.wpnames_todo := WeightedPnameSet.empty
else raise (Failure "Stopping")
end
done
type analyze_proc = Exe_env.t -> Procname.t -> Specs.summary
type process_result = Exe_env.t -> WeightedPname.t -> Specs.summary -> unit
(** Execute [analyze_proc] respecting dependencies between procedures,
and apply [process_result] to the result of the analysis. *)
let interprocedural_algorithm
(exe_env: Exe_env.t)
(_analyze_proc: analyze_proc)
(_process_result: process_result)
: unit =
let analyze_proc exe_env pname = (* wrap _analyze_proc and handle exceptions *)
let log_error_and_continue exn kind =
Reporting.log_error pname exn;
let prev_summary = Specs.get_summary_unsafe "interprocedural_algorithm" pname in
let timestamp = max 1 (prev_summary.Specs.timestamp) in
let stats = { prev_summary.Specs.stats with Specs.stats_failure = Some kind } in
let payload =
{ prev_summary.Specs.payload with Specs.preposts = Some []; } in
{ prev_summary with Specs.stats; payload; timestamp; } in
try _analyze_proc exe_env pname with
| Analysis_failure_exe kind as exn ->
(* in production mode, log the timeout/crash and continue with the summary we had before
the failure occurred *)
log_error_and_continue exn kind
| exn ->
(* this happens due to exceptions from assert false or some other unrecognized exception *)
log_error_and_continue exn (FKcrash (Printexc.to_string exn)) in
let process_result exe_env (pname, calls) summary =
(* wrap _process_result and handle exceptions *)
try _process_result exe_env (pname, calls) summary with
| exn ->
let err_name, _, _, _, _, _, _ = Exceptions.recognize_exception exn in
let err_str = "process_result raised " ^ (Localise.to_string err_name) in
L.err "Error: %s@." err_str;
let exn' = Exceptions.Internal_error (Localise.verbatim_desc err_str) in
Reporting.log_error pname exn';
post_process_procs exe_env [pname] in
main_algorithm exe_env analyze_proc process_result

@ -1,34 +0,0 @@
(*
* Copyright (c) 2009 - 2013 Monoidics ltd.
* Copyright (c) 2013 - present Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under the BSD style license found in the
* LICENSE file in the root directory of this source tree. An additional grant
* of patent rights can be found in the PATENTS file in the same directory.
*)
(** Implementation of the Interprocedural Footprint Analysis Algorithm *)
val procs_become_done : Cg.t -> Procname.t -> Procname.t list
val post_process_procs : Exe_env.t -> Procname.t list -> unit
(** Return the list of procedures which should perform a phase
transition from [FOOTPRINT] to [RE_EXECUTION] *)
val should_perform_transition : Cg.t -> Procname.t -> Procname.t list
(** Perform the transition from [FOOTPRINT] to [RE_EXECUTION] in spec table *)
val transition_footprint_re_exe : Procname.t -> Prop.normal Specs.Jprop.t list -> unit
(** Update the specs of the current proc after the execution of one phase *)
val update_specs :
Procname.t -> Specs.phase -> Specs.NormSpec.t list -> Specs.NormSpec.t list * bool
type analyze_proc = Exe_env.t -> Procname.t -> Specs.summary
type process_result = Exe_env.t -> (Procname.t * Cg.in_out_calls) -> Specs.summary -> unit
(** Execute [analyze_proc] respecting dependencies between procedures,
and apply [process_result] to the result of the analysis. *)
val interprocedural_algorithm : Exe_env.t -> analyze_proc -> process_result -> unit

@ -42,24 +42,12 @@ module Codegen = struct
exit 0
end
(** if true, save file dependency graph to disk *)
let save_file_dependency = ref false
(** command line option: if true, simulate the analysis only *)
let simulate = ref false
(** command line option: if true, run the analysis in checker mode *)
let checkers = ref false
(** command line option: name of the makefile to create with clusters and dependencies *)
let makefile_cmdline = ref ""
(** Command line option: only consider procedures whose name begins with the given string. *)
let select_proc = ref None
(** if not empty, only analyze the files in the list provided from the command-line *)
let only_files_cmdline = ref []
(** optional command-line name of the .cluster file *)
let cluster_cmdline = ref None
@ -119,7 +107,7 @@ let arg_desc =
"print the builtin functions and exit"
;
"-reactive",
Arg.Unit (fun () -> Config.ondemand_enabled := true; Config.reactive_mode := true),
Arg.Unit (fun () -> Config.reactive_mode := true),
None,
"analyze in reactive propagation mode starting from changed files"
;
@ -173,7 +161,7 @@ let arg_desc =
The analysis ignores errors caused by unknown procedure calls."
;
"-checkers",
Arg.Unit (fun () -> checkers := true; Config.intraprocedural := true),
Arg.Unit (fun () -> checkers := true),
None,
" run only the checkers instead of the full analysis"
;
@ -190,41 +178,20 @@ let arg_desc =
"-eradicate",
Arg.Unit (fun () ->
Config.eradicate := true;
checkers := true;
Config.intraprocedural := true),
checkers := true),
None,
" activate the eradicate checker for java annotations"
;
"-file",
Arg.String (fun s -> only_files_cmdline := s :: !only_files_cmdline),
Some "fname",
"specify one file to be analyzed (without path); the option can be repeated"
;
"-intraprocedural",
Arg.Set Config.intraprocedural,
None,
"perform an intraprocedural analysis only"
;
"-makefile",
Arg.Set_string makefile_cmdline,
Some "file",
"create a makefile to perform the analysis"
;
"-max_cluster",
Arg.Set_int Config.max_cluster_size,
Some "n",
"set the max number of procedures in each cluster (default n=2000)"
;
"-seconds_per_iteration",
Arg.Set_float seconds_per_iteration,
Some "n",
"set the number of seconds per iteration (default n=30)"
;
"-simulate",
Arg.Set simulate,
None,
" run a simulation of the analysis only"
;
"-subtype_multirange",
Arg.Set Config.subtype_multirange,
None,
@ -235,11 +202,6 @@ let arg_desc =
None,
"allow cast of undefined values"
;
"-select_proc",
Arg.String (fun s -> select_proc := Some s),
Some "string",
"only consider procedures whose name contains the given string"
;
"-symops_per_iteration",
Arg.Set_int symops_per_iteration,
Some "n",
@ -291,44 +253,7 @@ let () = (* parse command-line arguments *)
print_usage_exit ()
end
module Simulator = struct (** Simulate the analysis only *)
let reset_summaries cg =
IList.iter
(fun (pname, _) -> Specs.reset_summary cg pname None)
(Cg.get_nodes_and_calls cg)
(** Perform phase transition from [FOOTPRINT] to [RE_EXECUTION] for
the procedures enabled after the analysis of [proc_name] *)
let perform_transition exe_env proc_name =
let proc_names = Fork.should_perform_transition (Exe_env.get_cg exe_env) proc_name in
let f proc_name =
let joined_pres = [] in
Fork.transition_footprint_re_exe proc_name joined_pres in
IList.iter f proc_names
let process_result
(exe_env: Exe_env.t)
((proc_name: Procname.t), (calls: Cg.in_out_calls))
(_summ: Specs.summary) : unit =
L.err "in process_result %a@." Procname.pp proc_name;
let summ =
{ _summ with
Specs.stats = { _summ.Specs.stats with Specs.stats_calls = calls }} in
Specs.add_summary proc_name summ;
perform_transition exe_env proc_name;
let procs_done = Fork.procs_become_done (Exe_env.get_cg exe_env) proc_name in
Fork.post_process_procs exe_env procs_done
let analyze_proc _ proc_name =
L.err "in analyze_proc %a@." Procname.pp proc_name;
(* for i = 1 to Random.int 1000000 do () done; *)
let prev_summary = Specs.get_summary_unsafe "Simulator" proc_name in
let timestamp = max 1 (prev_summary.Specs.timestamp) in
{ prev_summary with Specs.timestamp = timestamp }
end
let analyze exe_env =
let analyze_exe_env exe_env =
let init_time = Unix.gettimeofday () in
L.log_progress_simple ".";
Specs.clear_spec_tbl ();
@ -339,16 +264,8 @@ let analyze exe_env =
let call_graph = Exe_env.get_cg exe_env in
Callbacks.iterate_callbacks Checkers.ST.store_summary call_graph exe_env
end
else if !simulate then (* simulate the analysis *)
begin
Simulator.reset_summaries (Exe_env.get_cg exe_env);
Fork.interprocedural_algorithm
exe_env
Simulator.analyze_proc
Simulator.process_result
end
else (* full analysis *)
begin
else
begin (* run the full analysis *)
Interproc.do_analysis exe_env;
Printer.c_files_write_html line_reader exe_env;
Interproc.print_stats exe_env;
@ -356,67 +273,6 @@ let analyze exe_env =
L.out "Interprocedural footprint analysis terminated in %f sec@." elapsed
end
(** add [x] to list [l] at position [nth] *)
let list_add_nth x l nth =
let rec add acc todo nth =
if nth = 0 then IList.rev_append acc (x:: todo)
else match todo with
| [] -> raise Not_found
| y:: todo' -> add (y:: acc) todo' (nth - 1) in
add [] l nth
(** sort a list weakly w.r.t. a compare function which doest not have to be a total order
the number returned by [compare x y] indicates 'how strongly' x should come before y *)
let weak_sort compare list =
let weak_add l x =
let length = IList.length l in
let fitness = Array.make (length + 1) 0 in
IList.iter (fun y -> fitness.(0) <- fitness.(0) + compare x y) l;
let best_position = ref 0 in
let best_value = ref (fitness.(0)) in
let i = ref 0 in
IList.iter (fun y ->
incr i;
let new_value = fitness.(!i - 1) - (compare x y) + (compare y x) in
fitness.(!i) <- new_value;
if new_value < !best_value then
begin
best_value := new_value;
best_position := !i
end)
l;
list_add_nth x l !best_position in
IList.fold_left weak_add [] list
let pp_stringlist fmt slist =
IList.iter (fun pname -> F.fprintf fmt "%s " pname) slist
let weak_sort_nodes cg =
let nodes = Cg.get_defined_nodes cg in
let grand_hash = Procname.Hash.create 1 in
let get_grandparents x =
try Procname.Hash.find grand_hash x with
| Not_found ->
let res = ref Procname.Set.empty in
let do_parent p = res := Procname.Set.union (Cg.get_parents cg p) !res in
Procname.Set.iter do_parent (Cg.get_parents cg x);
Procname.Hash.replace grand_hash x !res;
!res in
let cmp x y =
let res = ref 0 in
if Procname.Set.mem y (Cg.get_parents cg x) then res := !res - 2;
if Procname.Set.mem y (get_grandparents x) then res := !res - 1;
if Procname.Set.mem x (Cg.get_parents cg y) then res := !res + 2;
if Procname.Set.mem x (get_grandparents y) then res := !res + 1;
!res in
weak_sort cmp nodes
let file_pname_to_cg file_pname =
let source_file = ClusterMakefile.source_file_from_pname file_pname in
let source_dir = DB.source_dir_from_source_file source_file in
let cg_fname = DB.source_dir_get_internal_file source_dir ".cg" in
Cg.load_from_file cg_fname
let output_json_file_stats num_files num_procs num_lines =
let file_stats =
`Assoc [ ("files", `Int num_files);
@ -426,195 +282,11 @@ let output_json_file_stats num_files num_procs num_lines =
let f = open_out (Filename.concat !Config.results_dir Config.proc_stats_filename) in
Yojson.Basic.pretty_to_channel f file_stats
(** create clusters of minimal size in the dependence order,
with recursive parts grouped together. *)
let create_minimal_clusters file_cg exe_env to_analyze_map : Cluster.t list =
if !Cluster.trace_clusters then L.err "[create_minimal_clusters]@.";
let sorted_files = weak_sort_nodes file_cg in
let seen = ref Procname.Set.empty in
let clusters = ref [] in
let total_files = ref 0 in
let total_procs = ref 0 in
let total_LOC = ref 0 in
let total = Procname.Map.cardinal to_analyze_map in
let analyzed_map_done = ref 0 in
let create_cluster_elem (file_pname, changed_procs) = (* create a Cluster.elem for the file *)
L.log_progress "Creating clusters..." analyzed_map_done total;
let source_file = ClusterMakefile.source_file_from_pname file_pname in
if !Cluster.trace_clusters then
L.err " [create_minimal_clusters] %s@." (DB.source_file_to_string source_file);
DB.current_source := source_file;
match file_pname_to_cg file_pname with
| None ->
Cluster.create_bottomup source_file 0 []
| Some cg ->
(* decide whether a proc is active using pname_to_fname, i.e. whether this is the file associated to it *)
let proc_is_selected pname = match !select_proc with
| None -> true
| Some pattern_str -> string_is_prefix pattern_str (Procname.to_unique_id pname) in
let proc_is_active pname =
proc_is_selected pname &&
DB.source_file_equal (Exe_env.get_source exe_env pname) source_file in
let active_procs = IList.filter proc_is_active (Procname.Set.elements changed_procs) in
let naprocs = IList.length active_procs in
total_files := !total_files + 1;
total_procs := !total_procs + naprocs;
total_LOC := !total_LOC + (Cg.get_nLOC cg);
Cluster.create_bottomup source_file naprocs active_procs in
let choose_next_file list = (* choose next file from the weakly ordered list *)
let file_has_no_unseen_dependents fname =
Procname.Set.subset (Cg.get_dependents file_cg fname) !seen in
match IList.partition file_has_no_unseen_dependents list with
| (fname :: no_deps), deps -> (* if all the dependents of fname have been seen, bypass the order in the list *)
if !Cluster.trace_clusters then
L.err " [choose_next_file] %s (NO dependents)@." (Procname.to_string fname);
Some (fname, IList.rev_append no_deps deps)
| [], _ ->
begin
match list with
| fname :: list' ->
if !Cluster.trace_clusters then
L.err " [choose_next_file] %s (HAS dependents)@." (Procname.to_string fname);
Some(fname, list')
| [] -> None
end in
let rec build_clusters list = match choose_next_file list with
| None -> ()
| Some (fname, list') ->
if !Cluster.trace_clusters then
L.err " [build_clusters] %s@." (Procname.to_string fname);
if Procname.Set.mem fname !seen then build_clusters list'
else
let cluster_set = Procname.Set.add fname (Cg.get_recursive_dependents file_cg fname) in
let cluster, list'' = IList.partition (fun node -> Procname.Set.mem node cluster_set) list in
seen := Procname.Set.union !seen cluster_set;
let to_analyze =
IList.fold_right
(fun file_pname l ->
try (file_pname, Procname.Map.find file_pname to_analyze_map) :: l
with Not_found -> l)
cluster
[] in
if to_analyze <> [] then
begin
let cluster = IList.map create_cluster_elem to_analyze in
clusters := cluster :: !clusters;
end;
build_clusters list'' in
build_clusters sorted_files;
output_json_file_stats !total_files !total_procs !total_LOC;
IList.rev !clusters
let proc_list_to_set proc_list =
IList.fold_left (fun s p -> Procname.Set.add p s) Procname.Set.empty proc_list
(** compute the clusters *)
let compute_clusters exe_env : Cluster.t list =
if !Cluster.trace_clusters then
L.err "[compute_clusters] @.";
let file_cg = Cg.create () in
let global_cg = Exe_env.get_cg exe_env in
let nodes, edges = Cg.get_nodes_and_edges global_cg in
let defined_procs = Cg.get_defined_nodes global_cg in
let total_nodes = IList.length nodes in
let computed_nodes = ref 0 in
let do_node (n, defined, _) =
L.log_progress "Computing dependencies..." computed_nodes total_nodes;
if defined then
Cg.add_defined_node file_cg
(ClusterMakefile.source_file_to_pname (Exe_env.get_source exe_env n)) in
let do_edge (n1, n2) =
if Cg.node_defined global_cg n1 && Cg.node_defined global_cg n2 then
begin
let src1 = Exe_env.get_source exe_env n1 in
let src2 = Exe_env.get_source exe_env n2 in
if not (DB.source_file_equal src1 src2) then begin
if !Cluster.trace_clusters then
L.err "file_cg %s -> %s [%a]@."
(DB.source_file_to_string src1)
(DB.source_file_to_string src2)
Procname.pp n2;
Cg.add_edge file_cg
(ClusterMakefile.source_file_to_pname src1)
(ClusterMakefile.source_file_to_pname src2)
end
end in
IList.iter do_node nodes;
if IList.length nodes > 0 then L.log_progress_simple "\n";
if not !Config.intraprocedural then IList.iter do_edge edges;
if !save_file_dependency then
Cg.save_call_graph_dotty (Some (DB.filename_from_string "file_dependency.dot")) Specs.get_specs file_cg;
let files = Cg.get_defined_nodes file_cg in
let num_files = IList.length files in
L.err "@.Found %d defined procedures in %d files.@." (IList.length defined_procs) num_files;
let to_analyze_map =
(* get all procedures defined in a file *)
let get_defined_procs file_pname = match file_pname_to_cg file_pname with
| None -> Procname.Set.empty
| Some cg -> proc_list_to_set (Cg.get_defined_nodes cg) in
IList.fold_left
(fun m file_pname -> Procname.Map.add file_pname (get_defined_procs file_pname) m)
Procname.Map.empty
files in
L.err "Analyzing %d files.@.@." (Procname.Map.cardinal to_analyze_map);
let clusters = create_minimal_clusters file_cg exe_env to_analyze_map in
L.err "Minimal clusters:@.";
Cluster.print_clusters_stats clusters;
if !makefile_cmdline <> "" then
begin
let max_cluster_size = 50 in
let desired_cluster_size = 1 in
let clusters' =
Cluster.combine_split_clusters clusters max_cluster_size desired_cluster_size in
L.err "@.Combined clusters with max size %d@." max_cluster_size;
Cluster.print_clusters_stats clusters';
let number_of_clusters = IList.length clusters' in
let plural_of_cluster = if number_of_clusters != 1 then "s" else "" in
L.log_progress_simple
(Printf.sprintf "Analyzing %d cluster%s" number_of_clusters plural_of_cluster);
ClusterMakefile.create_cluster_makefile_and_exit clusters' file_cg !makefile_cmdline false
end;
let clusters' =
Cluster.combine_split_clusters
clusters !Config.max_cluster_size !Config.max_cluster_size in
L.err "@.Combined clusters with max size %d@." !Config.max_cluster_size;
Cluster.print_clusters_stats clusters';
let number_of_clusters = IList.length clusters' in
L.log_progress_simple ("\nAnalyzing "^(string_of_int number_of_clusters)^" clusters");
clusters'
(** Load a .c or .cpp file into an execution environment *)
let load_cg_file (_exe_env: Exe_env.initial) (source_dir : DB.source_dir) =
match Exe_env.add_cg _exe_env source_dir with
| None -> None
| Some cg ->
L.err "loaded %s@." (DB.source_dir_to_string source_dir);
Some cg
(** Populate the exe_env by loading cg files. *)
let populate_exe_env _exe_env (source_dirs : DB.source_dir list) =
let sorted_dirs = IList.sort DB.source_dir_compare source_dirs in
IList.iter
(fun source_dir ->
ignore (load_cg_file _exe_env source_dir))
sorted_dirs;
Exe_env.freeze _exe_env
(** Create an exe_env from a cluster. *)
let exe_env_from_cluster cluster =
let _exe_env =
let active_procs_opt = Cluster.get_active_procs cluster in
Exe_env.create active_procs_opt in
let source_dirs =
let fold_cluster_elem source_dirs ce =
let source_dir =
match Cluster.get_ondemand_info ce with
| Some source_dir ->
source_dir
| None ->
DB.source_dir_from_source_file ce.Cluster.ce_file in
source_dir :: source_dirs in
IList.fold_left fold_cluster_elem [] cluster in
let _exe_env = Exe_env.create () in
let source_dirs = [cluster] in
let sorted_dirs = IList.sort DB.source_dir_compare source_dirs in
IList.iter (fun src_dir -> ignore (Exe_env.add_cg _exe_env src_dir)) sorted_dirs;
let exe_env = Exe_env.freeze _exe_env in
@ -625,12 +297,11 @@ let analyze_cluster cluster_num tot_clusters (cluster : Cluster.t) =
let cluster_num_ref = ref cluster_num in
incr cluster_num_ref;
let exe_env = exe_env_from_cluster cluster in
let num_files = IList.length cluster in
let defined_procs = Cg.get_defined_nodes (Exe_env.get_cg exe_env) in
let num_procs = IList.length defined_procs in
L.err "@.Processing cluster #%d/%d with %d files and %d procedures@."
!cluster_num_ref tot_clusters num_files num_procs;
analyze exe_env
L.err "@.Processing cluster #%d/%d with %d procedures@."
!cluster_num_ref tot_clusters num_procs;
analyze_exe_env exe_env
let process_cluster_cmdline_exit () =
match !cluster_cmdline with
@ -684,32 +355,26 @@ let teardown_logging analyzer_out_of analyzer_err_of =
close_output_file analyzer_err_of;
end
(** Compute clusters when on-demand is active.
(** Compute clusters.
Each cluster will contain only the name of the directory for a file. *)
let compute_ondemand_clusters source_dirs =
let mk_cluster source_dir =
Cluster.create_ondemand source_dir in
let clusters =
let do_source_dir acc source_dir = mk_cluster source_dir @ acc in
IList.fold_left do_source_dir [] source_dirs in
Cluster.print_clusters_stats clusters;
let compute_clusters clusters =
Cluster.print_clusters clusters;
let num_files = IList.length clusters in
let num_procs = 0 (* can't compute it at this stage *) in
let num_lines = 0 in
output_json_file_stats num_files num_procs num_lines;
if !makefile_cmdline <> "" then
begin
let file_cg = Cg.create () in
ClusterMakefile.create_cluster_makefile_and_exit clusters file_cg !makefile_cmdline false
end;
if !makefile_cmdline <> ""
then ClusterMakefile.create_cluster_makefile_and_exit clusters !makefile_cmdline;
clusters
let print_prolog () =
match !cluster_cmdline with
| None ->
L.stdout "Starting analysis (Infer version %s)@." Version.versionString;
| Some clname -> L.stdout "Cluster %s@." clname
let () =
let () =
match !cluster_cmdline with
| None ->
L.stdout "Starting analysis (Infer version %s)@." Version.versionString;
| Some clname -> L.stdout "Cluster %s@." clname in
print_prolog ();
RegisterCheckers.register ();
Facebook.register_checkers ();
@ -721,29 +386,10 @@ let () =
process_cluster_cmdline_exit ();
let source_dirs =
if !only_files_cmdline = [] then DB.find_source_dirs ()
else
let filter source_dir =
let source_dir_base = Filename.basename (DB.source_dir_to_string source_dir) in
IList.exists (fun s -> string_is_prefix s source_dir_base) !only_files_cmdline in
IList.filter filter (DB.find_source_dirs ()) in
let source_dirs = DB.find_source_dirs () in
L.err "Found %d source files in %s@." (IList.length source_dirs) !Config.results_dir;
let clusters =
if !Config.ondemand_enabled
then
compute_ondemand_clusters source_dirs
else
begin
let _exe_env = Exe_env.create None in
let exe_env =
populate_exe_env _exe_env source_dirs in
L.err "Procedures defined in more than one file: %a@."
Procname.pp_set (Exe_env.get_procs_defined_in_several_files exe_env);
compute_clusters exe_env
end in
let clusters = compute_clusters source_dirs in
let tot_clusters = IList.length clusters in
IList.iter (analyze_cluster 0 tot_clusters) clusters;

@ -1072,11 +1072,76 @@ let report_custom_errors summary =
Reporting.log_error pname ~pre: (Some (Specs.Jprop.to_prop pre)) exn in
IList.iter report (custom_error_preconditions summary)
module SpecMap = Map.Make (struct
type t = Prop.normal Specs.Jprop.t
let compare = Specs.Jprop.compare
end)
(** Update the specs of the current proc after the execution of one phase *)
let update_specs proc_name phase (new_specs : Specs.NormSpec.t list)
: Specs.NormSpec.t list * bool =
let new_specs = Specs.normalized_specs_to_specs new_specs in
let old_specs = Specs.get_specs proc_name in
let changed = ref false in
let current_specs =
ref
(IList.fold_left
(fun map spec ->
SpecMap.add
spec.Specs.pre
(Paths.PathSet.from_renamed_list spec.Specs.posts, spec.Specs.visited) map)
SpecMap.empty old_specs) in
let re_exe_filter old_spec = (* filter out pres which failed re-exe *)
if phase == Specs.RE_EXECUTION &&
not (IList.exists
(fun new_spec -> Specs.Jprop.equal new_spec.Specs.pre old_spec.Specs.pre)
new_specs)
then begin
changed:= true;
L.out "Specs changed: removing pre of spec@\n%a@." (Specs.pp_spec pe_text None) old_spec;
current_specs := SpecMap.remove old_spec.Specs.pre !current_specs end
else () in
let add_spec spec = (* add a new spec by doing union of the posts *)
try
let old_post, old_visited = SpecMap.find spec.Specs.pre !current_specs in
let new_post, new_visited =
Paths.PathSet.union
old_post
(Paths.PathSet.from_renamed_list spec.Specs.posts),
Specs.Visitedset.union old_visited spec.Specs.visited in
if not (Paths.PathSet.equal old_post new_post) then begin
changed := true;
L.out "Specs changed: added new post@\n%a@."
(Propset.pp pe_text (Specs.Jprop.to_prop spec.Specs.pre))
(Paths.PathSet.to_propset new_post);
current_specs :=
SpecMap.add spec.Specs.pre (new_post, new_visited)
(SpecMap.remove spec.Specs.pre !current_specs) end
with Not_found ->
changed := true;
L.out "Specs changed: added new pre@\n%a@." (Specs.Jprop.pp_short pe_text) spec.Specs.pre;
current_specs :=
SpecMap.add
spec.Specs.pre
((Paths.PathSet.from_renamed_list spec.Specs.posts), spec.Specs.visited)
!current_specs in
let res = ref [] in
let convert pre (post_set, visited) =
res :=
Specs.spec_normalize
{ Specs.pre = pre;
Specs.posts = Paths.PathSet.elements post_set;
Specs.visited = visited }:: !res in
IList.iter re_exe_filter old_specs; (* filter out pre's which failed re-exe *)
IList.iter add_spec new_specs; (* add new specs *)
SpecMap.iter convert !current_specs;
!res,!changed
(** update a summary after analysing a procedure *)
let update_summary prev_summary specs phase proc_name elapsed res =
let normal_specs = IList.map Specs.spec_normalize specs in
let new_specs, changed = Fork.update_specs proc_name phase normal_specs in
let new_specs, changed = update_specs proc_name phase normal_specs in
let timestamp = max 1 (prev_summary.Specs.timestamp + if changed then 1 else 0) in
let stats_time = prev_summary.Specs.stats.Specs.stats_time +. elapsed in
let symops = prev_summary.Specs.stats.Specs.symops + SymOp.get_total () in
@ -1126,10 +1191,40 @@ let analyze_proc exe_env (proc_name: Procname.t) : Specs.summary =
report_runtime_exceptions tenv proc_desc updated_summary;
updated_summary
(** Perform the transition from [FOOTPRINT] to [RE_EXECUTION] in spec table *)
let transition_footprint_re_exe proc_name joined_pres =
L.out "Transition %a from footprint to re-exe@." Procname.pp proc_name;
let summary = Specs.get_summary_unsafe "transition_footprint_re_exe" proc_name in
let summary' =
if !Config.only_footprint then
{ summary with
Specs.phase = Specs.RE_EXECUTION;
}
else
let specs =
IList.map
(fun jp ->
Specs.spec_normalize
{ Specs.pre = jp;
posts = [];
visited = Specs.Visitedset.empty })
joined_pres in
let payload =
{ summary.Specs.payload with
Specs.preposts = Some specs; } in
let dependency_map =
Specs.re_initialize_dependency_map summary.Specs.dependency_map in
{ summary with
Specs.timestamp = 0;
phase = Specs.RE_EXECUTION;
dependency_map;
payload;
} in
Specs.add_summary proc_name summary'
(** Perform phase transition from [FOOTPRINT] to [RE_EXECUTION] for
the procedures enabled after the analysis of [proc_name] *)
let perform_transition exe_env cg proc_name =
let proc_names = Fork.should_perform_transition cg proc_name in
let perform_transition exe_env proc_name =
let transition pname =
let tenv = Exe_env.get_tenv exe_env pname in
let joined_pres = (* disable exceptions for leaks and protect against any other errors *)
@ -1157,27 +1252,9 @@ let perform_transition exe_env cg proc_name =
let err_str = "exception raised " ^ (Localise.to_string err_name) in
L.err "Error: %s %a@." err_str pp_ml_loc_opt ml_loc_opt;
[] in
Fork.transition_footprint_re_exe pname joined_pres in
IList.iter transition proc_names
(** Process the result of the analysis of [proc_name]: update the
returned summary and add it to the spec table. Executed in the
parent process as soon as a child process returns a result. *)
let process_result (exe_env: Exe_env.t) (proc_name, calls) (_summ: Specs.summary) : unit =
if !Config.trace_anal then L.err "===process_result@.";
Ident.NameGenerator.reset (); (* for consistency with multi-core mode *)
let summ =
{ _summ with
Specs.stats = { _summ.Specs.stats with Specs.stats_calls = calls }} in
Specs.add_summary proc_name summ;
let call_graph = Exe_env.get_cg exe_env in
perform_transition exe_env call_graph proc_name;
if !Config.only_footprint || summ.Specs.phase != Specs.FOOTPRINT then
(try Specs.store_summary proc_name summ with
Sys_error s ->
L.err "@.### System Error while writing summary of procedure %a to disk: %s@." Procname.pp proc_name s);
let procs_done = Fork.procs_become_done call_graph proc_name in
Fork.post_process_procs exe_env procs_done
transition_footprint_re_exe pname joined_pres in
if Specs.get_phase proc_name == Specs.FOOTPRINT
then transition proc_name
let analyze_proc_for_ondemand exe_env proc_name =
let saved_footprint = !Config.footprint in
@ -1186,13 +1263,13 @@ let analyze_proc_for_ondemand exe_env proc_name =
Specs.add_summary proc_name summaryfp;
let cg = Cg.create () in
Cg.add_defined_node cg proc_name;
perform_transition exe_env cg proc_name;
perform_transition exe_env proc_name;
Config.footprint := false;
let summaryre = analyze_proc exe_env proc_name in
Specs.add_summary proc_name summaryre;
Config.footprint := saved_footprint
let interprocedural_algorithm_ondemand exe_env : unit =
let interprocedural_algorithm exe_env : unit =
let call_graph = Exe_env.get_cg exe_env in
let filter_initial proc_name =
let summary = Specs.get_summary_unsafe "main_algorithm" proc_name in
@ -1210,7 +1287,7 @@ let interprocedural_algorithm_ondemand exe_env : unit =
else true in
if
reactive_changed && (* in reactive mode, only analyze changed procedures *)
Ondemand.procedure_should_be_analyzed pdesc proc_name
Ondemand.procedure_should_be_analyzed proc_name
then
Some pdesc
else
@ -1227,7 +1304,6 @@ let interprocedural_algorithm_ondemand exe_env : unit =
() in
IList.iter process_one_proc procs_to_analyze
(** Perform the analysis of an exe_env *)
let do_analysis exe_env =
if !Config.trace_anal then L.err "do_analysis@.";
@ -1256,7 +1332,6 @@ let do_analysis exe_env =
(fun ((pn, _) as x) ->
let should_init () =
Config.analyze_models ||
not !Config.ondemand_enabled ||
Specs.get_summary pn = None in
if should_init ()
then init_proc x)
@ -1270,15 +1345,10 @@ let do_analysis exe_env =
analyze_proc_for_ondemand exe_env proc_name in
{ Ondemand.analyze_ondemand; get_proc_desc; } in
if !Config.ondemand_enabled
then
begin
Ondemand.set_callbacks callbacks;
interprocedural_algorithm_ondemand exe_env;
Ondemand.unset_callbacks ()
end
else
Fork.interprocedural_algorithm exe_env analyze_proc process_result
Ondemand.set_callbacks callbacks;
interprocedural_algorithm exe_env;
Ondemand.unset_callbacks ()
let visited_and_total_nodes cfg =
let all_nodes =
@ -1296,9 +1366,8 @@ let visited_and_total_nodes cfg =
(** Print the stats for the given cfg; consider every defined proc unless a proc with the same name
was defined in another module, and was the one which was analyzed *)
let print_stats_cfg proc_shadowed proc_is_active cfg =
let print_stats_cfg proc_shadowed cfg =
let err_table = Errlog.create_err_table () in
let active_procs = IList.filter proc_is_active (Cfg.get_defined_procs cfg) in
let nvisited, ntotal = visited_and_total_nodes cfg in
let node_filter n =
let node_procname = Cfg.Procdesc.get_proc_name (Cfg.Node.get_proc_desc n) in
@ -1345,8 +1414,14 @@ let print_stats_cfg proc_shadowed proc_is_active cfg =
(* F.fprintf fmt "VISITED: %a@\n" (pp_seq pp_node) nodes_visited;
F.fprintf fmt "TOTAL: %a@\n" (pp_seq pp_node) nodes_total; *)
F.fprintf fmt "@\n++++++++++++++++++++++++++++++++++++++++++++++++++@\n";
F.fprintf fmt "+ FILE: %s LOC: %n VISITED: %d/%d SYMOPS: %d@\n" (DB.source_file_to_string !DB.current_source) !Config.nLOC (IList.length nodes_visited) (IList.length nodes_total) !tot_symops;
F.fprintf fmt "+ num_procs: %d (%d ok, %d timeouts, %d errors, %d warnings, %d infos)@\n" !num_proc num_ok_proc !num_timeout num_errors num_warnings num_infos;
F.fprintf fmt "+ FILE: %s LOC: %n VISITED: %d/%d SYMOPS: %d@\n"
(DB.source_file_to_string !DB.current_source)
!Config.nLOC
(IList.length nodes_visited)
(IList.length nodes_total)
!tot_symops;
F.fprintf fmt "+ num_procs: %d (%d ok, %d timeouts, %d errors, %d warnings, %d infos)@\n"
!num_proc num_ok_proc !num_timeout num_errors num_warnings num_infos;
F.fprintf fmt "+ detail procs:@\n";
F.fprintf fmt "+ - No Errors and No Specs: %d@\n" !num_nospec_noerror_proc;
F.fprintf fmt "+ - Some Errors and No Specs: %d@\n" !num_nospec_error_proc;
@ -1367,20 +1442,18 @@ let print_stats_cfg proc_shadowed proc_is_active cfg =
print_file_stats fmt ();
close_out outc
with Sys_error _ -> () in
IList.iter compute_stats_proc active_procs;
IList.iter compute_stats_proc (Cfg.get_defined_procs cfg);
L.out "%a" print_file_stats ();
save_file_stats ()
let _print_stats exe_env =
let proc_is_active proc_desc =
Exe_env.proc_is_active exe_env (Cfg.Procdesc.get_proc_name proc_desc) in
Exe_env.iter_files (fun fname cfg ->
let proc_shadowed proc_desc =
(** return true if a proc with the same name in another module was analyzed instead *)
let proc_name = Cfg.Procdesc.get_proc_name proc_desc in
Exe_env.get_source exe_env proc_name <> fname in
print_stats_cfg proc_shadowed proc_is_active cfg) exe_env
(** Print the stats for all the files in the exe_env *)
let print_stats exe_env =
if !Config.developer_mode then _print_stats exe_env
if !Config.developer_mode then
Exe_env.iter_files
(fun fname cfg ->
let proc_shadowed proc_desc =
(** return true if a proc with the same name in another module was analyzed instead *)
let proc_name = Cfg.Procdesc.get_proc_name proc_desc in
Exe_env.get_source exe_env proc_name <> fname in
print_stats_cfg proc_shadowed cfg)
exe_env

@ -13,12 +13,6 @@ module L = Logging
module F = Format
let trace () = Config.from_env_variable "INFER_TRACE_ONDEMAND"
let one_cluster_per_procedure () = false
let () =
Config.ondemand_enabled := true
let across_files () = true
(** Name of the ondemand file *)
let ondemand_file () = Config.get_env_variable "INFER_ONDEMAND_FILE"
@ -59,7 +53,7 @@ let unset_callbacks () =
let nesting = ref 0
let procedure_should_be_analyzed curr_pdesc proc_name =
let procedure_should_be_analyzed proc_name =
match AttributesTable.load_attributes proc_name with
| Some proc_attributes ->
let currently_analyzed =
@ -70,18 +64,10 @@ let procedure_should_be_analyzed curr_pdesc proc_name =
Specs.get_timestamp summary > 0
| None ->
false in
(* The procedure to be analyzed is in the same file as the current one. *)
let same_file proc_attributes =
(Cfg.Procdesc.get_loc curr_pdesc).Location.file
=
proc_attributes.ProcAttributes.loc.Location.file in
!Config.ondemand_enabled &&
proc_attributes.ProcAttributes.is_defined && (* we have the implementation *)
not currently_analyzed && (* avoid infinite loops *)
not already_analyzed && (* avoid re-analysis of the same procedure *)
(across_files () || (* whether to push the analysis into other files *)
same_file proc_attributes)
not already_analyzed (* avoid re-analysis of the same procedure *)
| None ->
false
@ -199,7 +185,7 @@ let do_analysis ~propagate_exceptions curr_pdesc callee_pname =
match !callbacks_ref with
| Some callbacks
when procedure_should_be_analyzed curr_pdesc callee_pname ->
when procedure_should_be_analyzed callee_pname ->
begin
match callbacks.get_proc_desc callee_pname with
| Some _ -> really_do_analysis callbacks.analyze_ondemand

@ -27,10 +27,8 @@ type callbacks =
triggered during the analysis of curr_pname. *)
val do_analysis : propagate_exceptions:bool -> Cfg.Procdesc.t -> Procname.t -> unit
val one_cluster_per_procedure : unit -> bool
(** Check if the procedure called by the current pdesc needs to be analyzed. *)
val procedure_should_be_analyzed : Cfg.Procdesc.t -> Procname.t -> bool
(** Check if the procedure called needs to be analyzed. *)
val procedure_should_be_analyzed : Procname.t -> bool
(** Set the callbacks used to perform on-demand analysis. *)
val set_callbacks : callbacks -> unit

@ -72,16 +72,16 @@ end = struct
(is_visited node) false fmt (Cfg.Node.get_id node)) preds;
F.fprintf fmt "<br>SUCCS: @\n";
IList.iter (fun node -> Io_infer.Html.pp_node_link [".."] ""
(IList.map Cfg.Node.get_id (Cfg.Node.get_preds node))
(IList.map Cfg.Node.get_id (Cfg.Node.get_succs node))
(IList.map Cfg.Node.get_id (Cfg.Node.get_exn node))
(is_visited node) false fmt (Cfg.Node.get_id node)) succs;
(IList.map Cfg.Node.get_id (Cfg.Node.get_preds node))
(IList.map Cfg.Node.get_id (Cfg.Node.get_succs node))
(IList.map Cfg.Node.get_id (Cfg.Node.get_exn node))
(is_visited node) false fmt (Cfg.Node.get_id node)) succs;
F.fprintf fmt "<br>EXN: @\n";
IList.iter (fun node -> Io_infer.Html.pp_node_link [".."] ""
(IList.map Cfg.Node.get_id (Cfg.Node.get_preds node))
(IList.map Cfg.Node.get_id (Cfg.Node.get_succs node))
(IList.map Cfg.Node.get_id (Cfg.Node.get_exn node))
(is_visited node) false fmt (Cfg.Node.get_id node)) exn;
(IList.map Cfg.Node.get_id (Cfg.Node.get_preds node))
(IList.map Cfg.Node.get_id (Cfg.Node.get_succs node))
(IList.map Cfg.Node.get_id (Cfg.Node.get_exn node))
(is_visited node) false fmt (Cfg.Node.get_id node)) exn;
F.fprintf fmt "<br>@\n";
F.pp_print_flush fmt ();
true
@ -373,7 +373,7 @@ end = struct
end
(** Create filename.c.html with line numbers and links to nodes *)
let c_file_write_html proc_is_active linereader fname cfg =
let c_file_write_html linereader fname cfg =
let proof_cover = ref Specs.Visitedset.empty in
let tbl = Hashtbl.create 11 in
let process_node n =
@ -387,8 +387,7 @@ let c_file_write_html proc_is_active linereader fname cfg =
let global_err_log = Errlog.empty () in
let do_proc proc_name proc_desc = (* add the err_log of this proc to [global_err_log] *)
let proc_loc = (Cfg.Procdesc.get_loc proc_desc) in
if proc_is_active proc_name &&
Cfg.Procdesc.is_defined proc_desc &&
if Cfg.Procdesc.is_defined proc_desc &&
(DB.source_file_equal proc_loc.Location.file !DB.current_source) then
begin
IList.iter process_node (Cfg.Procdesc.get_nodes proc_desc);
@ -444,5 +443,4 @@ let c_file_write_html proc_is_active linereader fname cfg =
Io_infer.Html.close (fd, fmt))
let c_files_write_html linereader exe_env =
let proc_is_active = Exe_env.proc_is_active exe_env in
if !Config.write_html then Exe_env.iter_files (c_file_write_html proc_is_active linereader) exe_env
if !Config.write_html then Exe_env.iter_files (c_file_write_html linereader) exe_env

@ -421,10 +421,8 @@ let is_defined cfg pname =
| Some pdesc -> Cfg.Procdesc.is_defined pdesc
let call_should_be_skipped callee_pname summary =
(* skip all procedures in intra-precedural mode *)
!Config.intraprocedural
(* check skip flag *)
|| Specs.get_flag callee_pname proc_flag_skip <> None
Specs.get_flag callee_pname proc_flag_skip <> None
(* skip abstract methods *)
|| summary.Specs.attributes.ProcAttributes.is_abstract
(* treat calls with no specs as skip functions in angelic mode *)
@ -1091,8 +1089,7 @@ let rec sym_exec cfg tenv pdesc _instr (_prop: Prop.normal Prop.t) path
let resolved_pnames =
resolve_virtual_pname tenv norm_prop url_handled_args callee_pname call_flags in
let exec_one_pname pname =
if !Config.ondemand_enabled then
Ondemand.do_analysis ~propagate_exceptions:true pdesc pname;
Ondemand.do_analysis ~propagate_exceptions:true pdesc pname;
let exec_skip_call ret_type =
skip_call norm_prop path pname loc ret_ids (Some ret_type) url_handled_args in
match Specs.get_summary pname with
@ -1117,8 +1114,7 @@ let rec sym_exec cfg tenv pdesc _instr (_prop: Prop.normal Prop.t) path
match resolve_virtual_pname tenv prop_r n_actual_params fn call_flags with
| resolved_pname :: _ -> resolved_pname
| [] -> fn in
if !Config.ondemand_enabled then
Ondemand.do_analysis ~propagate_exceptions:true pdesc resolved_pname;
Ondemand.do_analysis ~propagate_exceptions:true pdesc resolved_pname;
let callee_pdesc_opt = Cfg.Procdesc.find_from_name cfg resolved_pname in
let ret_typ_opt = Option.map Cfg.Procdesc.get_ret_type callee_pdesc_opt in
let sentinel_result =

@ -320,8 +320,7 @@ let callback_performance_checker { Callbacks.proc_desc; proc_name; get_proc_desc
| None -> ()
| Some pd -> check_one_procedure tenv pn pd in
{ Ondemand.analyze_ondemand; get_proc_desc; } in
if !Config.ondemand_enabled
|| Ondemand.procedure_should_be_analyzed proc_desc proc_name
if Ondemand.procedure_should_be_analyzed proc_name
then
begin
Ondemand.set_callbacks callbacks;

@ -385,7 +385,7 @@ module Main =
Build(EmptyExtension)
(** Eradicate checker for Java @Nullable annotations. *)
let callback_eradicate ({ Callbacks.get_proc_desc; idenv; proc_desc; proc_name } as callback_args) =
let callback_eradicate ({ Callbacks.get_proc_desc; idenv; proc_name } as callback_args) =
let checks =
{
TypeCheck.eradicate = true;
@ -405,8 +405,7 @@ let callback_eradicate ({ Callbacks.get_proc_desc; idenv; proc_desc; proc_name }
proc_desc = pdesc; } in
{ Ondemand.analyze_ondemand; get_proc_desc; } in
if not !Config.ondemand_enabled ||
Ondemand.procedure_should_be_analyzed proc_desc proc_name
if Ondemand.procedure_should_be_analyzed proc_name
then
begin
Ondemand.set_callbacks callbacks;

@ -375,7 +375,6 @@ let check_return_annotation
Models.Inference.proc_mark_return_nullable curr_pname;
if return_not_nullable &&
!Config.ondemand_enabled &&
activate_propagate_return_nullable
then
spec_make_return_nullable curr_pname;

@ -556,8 +556,7 @@ let typecheck_instr ext calls_this checks (node: Cfg.Node.t) idenv get_proc_desc
typestate (* skip othe builtins *)
| Sil.Call (ret_ids, Sil.Const (Sil.Cfun callee_pname), _etl, loc, cflags)
when Specs.proc_resolve_attributes (* AttributesTable.load_attributes *) callee_pname <> None ->
if !Config.ondemand_enabled then
Ondemand.do_analysis ~propagate_exceptions:true curr_pdesc callee_pname;
Ondemand.do_analysis ~propagate_exceptions:true curr_pdesc callee_pname;
let callee_attributes =
match Specs.proc_resolve_attributes (* AttributesTable.load_attributes *) callee_pname with
| Some proc_attributes -> proc_attributes

Loading…
Cancel
Save