[starvation] whole program analysis

Summary:
First step towards a global analysis.  A new command line flag activates the step in `Driver`.

The whole-program analysis is a simple, quadratic (inefficient-as-yet), iteration over all domain elements.  However, it is restricted to those elements that are explicitly scheduled to run.

Reviewed By: skcho

Differential Revision: D17787441

fbshipit-source-id: 9fecd766c
master
Nikos Gorogiannis 5 years ago committed by Facebook Github Bot
parent bd1b55ef51
commit 08df37ef76

@ -1751,6 +1751,10 @@ INTERNAL OPTIONS
Deactivates: During starvation analysis, report strict mode
violations (Android only) (Conversely: --starvation-strict-mode)
--starvation-whole-program
Activates: Run whole-program starvation analysis (Conversely:
--no-starvation-whole-program)
--stats-report file
Write a report of the analysis results to a file

@ -2213,6 +2213,11 @@ and starvation_skip_analysis =
"Specify combinations of class/method list that should be skipped during starvation analysis"
and starvation_whole_program =
CLOpt.mk_bool ~long:"starvation-whole-program" ~default:false
"Run whole-program starvation analysis"
and spec_abs_level =
CLOpt.mk_int ~deprecated:["spec_abs_level"] ~long:"spec-abs-level" ~default:1 ~meta:"int"
{|Set the level of abstracting the postconditions of discovered specs:
@ -3199,6 +3204,8 @@ and starvation_skip_analysis = !starvation_skip_analysis
and starvation_strict_mode = !starvation_strict_mode
and starvation_whole_program = !starvation_whole_program
and stats_report = !stats_report
and subtype_multirange = !subtype_multirange

@ -672,6 +672,8 @@ val starvation_skip_analysis : Yojson.Basic.t
val starvation_strict_mode : bool
val starvation_whole_program : bool
val stats_report : string option
val subtype_multirange : bool

@ -123,13 +123,13 @@ module TransferFunctions (CFG : ProcCfg.S) = struct
let procname = Summary.get_proc_name summary in
let is_java = Typ.Procname.is_java procname in
let do_lock locks loc astate =
List.filter_map ~f:get_lock_path locks |> Domain.acquire tenv astate ~procname ~loc
List.filter_map ~f:get_lock_path locks |> Domain.acquire ~tenv astate ~procname ~loc
in
let do_unlock locks astate = List.filter_map ~f:get_lock_path locks |> Domain.release astate in
let do_call callee loc astate =
let callsite = CallSite.make callee loc in
Payload.read ~caller_summary:summary ~callee_pname:callee
|> Option.fold ~init:astate ~f:(Domain.integrate_summary tenv callsite)
|> Option.fold ~init:astate ~f:(Domain.integrate_summary ~tenv callsite)
in
match instr with
| Assign _ | Call (_, Indirect _, _, _, _) ->
@ -210,7 +210,7 @@ let analyze_procedure {Callbacks.exe_env; summary} =
| _ ->
FormalMap.get_formal_base 0 formals |> Option.map ~f:(fun base -> (base, []))
in
Domain.acquire tenv astate ~procname ~loc (Option.to_list lock)
Domain.acquire ~tenv astate ~procname ~loc (Option.to_list lock)
else astate
in
let set_thread_status_by_annotation (astate : Domain.t) =
@ -474,6 +474,7 @@ let report_on_parallel_composition ~should_report_starvation tenv pdesc pair loc
| LockAcquire other_lock
when CriticalPair.may_deadlock pair other_pair
&& should_report_deadlock_on_current_proc pair other_pair ->
let open Domain in
let error_message =
Format.asprintf
"Potential deadlock. %a (Trace 1) and %a (Trace 2) acquire locks %a and %a in reverse \
@ -531,7 +532,7 @@ let report_on_pair ((tenv, summary) as env) (pair : Domain.CriticalPair.t) repor
let loc = CriticalPair.get_earliest_lock_or_call_loc ~procname:pname pair in
let ltr = CriticalPair.make_trace ~header:"In method " pname pair in
ReportMap.add_deadlock tenv pdesc loc ltr error_message report_map
| LockAcquire lock ->
| LockAcquire lock when not Config.starvation_whole_program ->
Lock.owner_class lock
|> Option.value_map ~default:report_map ~f:(fun other_class ->
(* get the class of the root variable of the lock in the lock acquisition
@ -549,14 +550,93 @@ let report_on_pair ((tenv, summary) as env) (pair : Domain.CriticalPair.t) repor
let reporting {Callbacks.procedures} =
let report_on_summary env report_map (summary : Domain.summary) =
Domain.CriticalPairs.fold (report_on_pair env) summary.critical_pairs report_map
in
let report_procedure report_map ((_, summary) as env) =
let proc_desc = Summary.get_proc_desc summary in
if should_report proc_desc then
Payload.read_toplevel_procedure (Procdesc.get_proc_name proc_desc)
|> Option.fold ~init:report_map ~f:(report_on_summary env)
else report_map
if Config.starvation_whole_program then ()
else
let report_on_summary env report_map (summary : Domain.summary) =
Domain.CriticalPairs.fold (report_on_pair env) summary.critical_pairs report_map
in
let report_procedure report_map ((_, summary) as env) =
let proc_desc = Summary.get_proc_desc summary in
if should_report proc_desc then
Payload.read_toplevel_procedure (Procdesc.get_proc_name proc_desc)
|> Option.fold ~init:report_map ~f:(report_on_summary env)
else report_map
in
List.fold procedures ~init:ReportMap.empty ~f:report_procedure |> ReportMap.store
(* given a scheduled-work item, read the summary of the scheduled method from the disk
and adapt its contents to the thread it was scheduled too *)
let get_summary_of_scheduled_work (work_item : Domain.ScheduledWorkItem.t) =
let astate = {Domain.bottom with thread= work_item.thread} in
let callsite = CallSite.make work_item.procname work_item.loc in
Summary.OnDisk.get work_item.procname
|> Option.bind ~f:(fun (summary : Summary.t) -> Payloads.starvation summary.payloads)
|> Option.map ~f:(Domain.integrate_summary ?tenv:None callsite astate)
|> Option.map ~f:(fun (astate : Domain.t) -> astate.critical_pairs)
(* given a summary, do [f work critical_pairs] for each [work] item scheduled in the summary,
where [critical_pairs] are those of the method scheduled, adapted to the thread it's scheduled for *)
let iter_summary ~f (summary : Summary.t) =
let caller = Summary.get_proc_name summary in
let open Domain in
Payloads.starvation summary.payloads
|> Option.iter ~f:(fun ({scheduled_work} : summary) ->
ScheduledWorkDomain.iter
(fun work -> get_summary_of_scheduled_work work |> Option.iter ~f:(f caller))
scheduled_work )
module WorkHashSet = struct
module T = struct
type t = Typ.Procname.t * Domain.CriticalPair.t
(* [compare] for critical pairs ignore various fields, so using a generated equality here would
break the polymorphic hash function. We use [phys_equal] instead and rely on the clients to
not add duplicate items. *)
let equal = phys_equal
let hash = Hashtbl.hash
end
include Caml.Hashtbl.Make (T)
let add_pairs work_set caller pairs =
let open Domain in
CriticalPairs.iter (fun pair -> replace work_set (caller, pair) ()) pairs
end
let report exe_env work_set =
let open Domain in
let wrap_report (procname, (pair : CriticalPair.t)) () init =
Summary.OnDisk.get procname
|> Option.fold ~init ~f:(fun acc summary ->
let pdesc = Summary.get_proc_desc summary in
let tenv = Exe_env.get_tenv exe_env procname in
let acc = report_on_pair (tenv, summary) pair acc in
match pair.elem.event with
| LockAcquire lock ->
let should_report_starvation =
CriticalPair.is_uithread pair && not (Typ.Procname.is_constructor procname)
in
WorkHashSet.fold
(fun (other_procname, (other_pair : CriticalPair.t)) () acc ->
report_on_parallel_composition ~should_report_starvation tenv pdesc pair lock
other_procname other_pair acc )
work_set acc
| _ ->
acc )
in
List.fold procedures ~init:ReportMap.empty ~f:report_procedure |> ReportMap.store
WorkHashSet.fold wrap_report work_set ReportMap.empty |> ReportMap.store
let whole_program_analysis () =
L.progress "Starvation whole program analysis starts.@." ;
let work_set = WorkHashSet.create 1 in
let exe_env = Exe_env.mk () in
L.progress "Processing on-disk summaries...@." ;
SpecsFiles.iter ~f:(iter_summary ~f:(WorkHashSet.add_pairs work_set)) ;
L.progress "Loaded %d pairs@." (WorkHashSet.length work_set) ;
L.progress "Reporting on processed summaries...@." ;
report exe_env work_set

@ -10,3 +10,5 @@ open! IStd
val analyze_procedure : Callbacks.proc_callback_t
val reporting : Callbacks.cluster_callback_t
val whole_program_analysis : unit -> unit

@ -74,7 +74,8 @@ module ThreadDomain = struct
(* callee pair is UI / BG / Any and caller has abstracted away info so use callee's knowledge *)
Some pair_thread
| UIThread, BGThread | BGThread, UIThread ->
(* annotations or assertions are incorrectly used in code, just drop the callee pair *)
(* annotations or assertions are incorrectly used in code, or callee is path-sensitive on
thread-identity, just drop the callee pair *)
None
| _, _ ->
(* caller is UI or BG and callee does not disagree, so use that *)
@ -413,19 +414,19 @@ let is_recursive_lock event tenv =
- [event] is not a lock event, or,
- we do not hold the lock, or,
- the lock is not recursive. *)
let should_skip tenv_opt event lock_state =
Option.exists tenv_opt ~f:(fun tenv ->
let should_skip ?tenv event lock_state =
Option.exists tenv ~f:(fun tenv ->
LockState.is_lock_taken event lock_state && is_recursive_lock event tenv )
module CriticalPairs = struct
include CriticalPair.FiniteSet
let with_callsite astate tenv lock_state call_site thread =
let with_callsite astate ?tenv lock_state call_site thread =
let existing_acquisitions = LockState.get_acquisitions lock_state in
fold
(fun ({elem= {event}} as critical_pair : CriticalPair.t) acc ->
if should_skip (Some tenv) event lock_state then acc
if should_skip ?tenv event lock_state then acc
else
CriticalPair.integrate_summary_opt existing_acquisitions call_site thread critical_pair
|> Option.fold ~init:acc ~f:(fun acc new_pair -> add new_pair acc) )
@ -559,20 +560,20 @@ let leq ~lhs ~rhs =
&& ScheduledWorkDomain.leq ~lhs:lhs.scheduled_work ~rhs:rhs.scheduled_work
let add_critical_pair tenv_opt lock_state event thread ~loc acc =
if should_skip tenv_opt event lock_state then acc
let add_critical_pair ?tenv lock_state event thread ~loc acc =
if should_skip ?tenv event lock_state then acc
else
let acquisitions = LockState.get_acquisitions lock_state in
let critical_pair = CriticalPair.make ~loc acquisitions event thread in
CriticalPairs.add critical_pair acc
let acquire tenv ({lock_state; critical_pairs} as astate) ~procname ~loc locks =
let acquire ?tenv ({lock_state; critical_pairs} as astate) ~procname ~loc locks =
{ astate with
critical_pairs=
List.fold locks ~init:critical_pairs ~f:(fun acc lock ->
let event = Event.make_acquire lock in
add_critical_pair (Some tenv) lock_state event astate.thread ~loc acc )
add_critical_pair ?tenv lock_state event astate.thread ~loc acc )
; lock_state=
List.fold locks ~init:lock_state ~f:(fun acc lock -> LockState.acquire ~procname ~loc lock acc)
}
@ -581,7 +582,7 @@ let acquire tenv ({lock_state; critical_pairs} as astate) ~procname ~loc locks =
let make_call_with_event new_event ~loc astate =
{ astate with
critical_pairs=
add_critical_pair None astate.lock_state new_event astate.thread ~loc astate.critical_pairs }
add_critical_pair astate.lock_state new_event astate.thread ~loc astate.critical_pairs }
let blocking_call ~callee sev ~loc astate =
@ -601,7 +602,7 @@ let release ({lock_state} as astate) locks =
let add_guard ~acquire_now ~procname ~loc tenv astate guard lock =
let astate = {astate with guard_map= GuardToLockMap.add_guard ~guard ~lock astate.guard_map} in
if acquire_now then acquire tenv astate ~procname ~loc [lock] else astate
if acquire_now then acquire ~tenv astate ~procname ~loc [lock] else astate
let remove_guard astate guard =
@ -621,7 +622,7 @@ let unlock_guard astate guard =
let lock_guard ~procname ~loc tenv astate guard =
GuardToLockMap.find_opt guard astate.guard_map
|> Option.value_map ~default:astate ~f:(fun lock_opt ->
FlatLock.get lock_opt |> Option.to_list |> acquire tenv astate ~procname ~loc )
FlatLock.get lock_opt |> Option.to_list |> acquire ~tenv astate ~procname ~loc )
let filter_blocking_calls ({critical_pairs} as astate) =
@ -649,9 +650,10 @@ let pp_summary fmt (summary : summary) =
summary.scheduled_work
let integrate_summary tenv callsite (astate : t) (summary : summary) =
let integrate_summary ?tenv callsite (astate : t) (summary : summary) =
let critical_pairs' =
CriticalPairs.with_callsite summary.critical_pairs tenv astate.lock_state callsite astate.thread
CriticalPairs.with_callsite summary.critical_pairs ?tenv astate.lock_state callsite
astate.thread
in
{ astate with
critical_pairs= CriticalPairs.join astate.critical_pairs critical_pairs'

@ -48,9 +48,16 @@ end
module LockState : AbstractDomain.WithTop
(** a lock acquisition with location information *)
module Acquisition : sig
type t = private
{lock: Lock.t; loc: Location.t [@compare.ignore]; procname: Typ.Procname.t [@compare.ignore]}
[@@deriving compare]
end
(** A set of lock acquisitions with source locations and procnames. *)
module Acquisitions : sig
type t
include PrettyPrintable.PPSet with type elt = Acquisition.t
val lock_is_held : Lock.t -> t -> bool
(** is the given lock in the set *)
@ -134,7 +141,7 @@ type t =
include AbstractDomain.WithBottom with type t := t
val acquire : Tenv.t -> t -> procname:Typ.Procname.t -> loc:Location.t -> Lock.t list -> t
val acquire : ?tenv:Tenv.t -> t -> procname:Typ.Procname.t -> loc:Location.t -> Lock.t list -> t
(** simultaneously acquire a number of locks, no-op if list is empty *)
val release : t -> Lock.t list -> t
@ -173,7 +180,7 @@ type summary =
val pp_summary : F.formatter -> summary -> unit
val integrate_summary : Tenv.t -> CallSite.t -> t -> summary -> t
val integrate_summary : ?tenv:Tenv.t -> CallSite.t -> t -> summary -> t
val summary_of_astate : t -> summary

@ -414,7 +414,9 @@ let analyze_and_report ?suppress_console_report ~changed_files mode =
RunState.store () ) ;
if should_analyze then
if SourceFiles.is_empty () && Config.capture then error_nothing_to_analyze mode
else execute_analyze ~changed_files ;
else (
execute_analyze ~changed_files ;
if Config.starvation_whole_program then Starvation.whole_program_analysis () ) ;
if should_report && Config.report then report ?suppress_console:suppress_console_report ()

@ -0,0 +1,95 @@
/*
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
import java.util.concurrent.Executor;
class Deadlock {
// executors are injected and annotated as to what thread they schedule to
@ForUiThread private final Executor mUiThreadExecutor = null;
@ForNonUiThread private final Executor mNonUiThreadExecutor = null;
Object monitorA, monitorB;
// text-book deadlock between UI and background thread
public void postDeadlockBad() {
mUiThreadExecutor.execute(
new Runnable() {
@Override
public void run() {
synchronized (monitorA) {
synchronized (monitorB) {
}
}
}
});
mNonUiThreadExecutor.execute(
new Runnable() {
@Override
public void run() {
synchronized (monitorB) {
synchronized (monitorA) {
}
}
}
});
}
Object monitorC, monitorD;
// non-deadlock as both work items are scheduled on same thread
public void postOnUIThreadOk() {
mUiThreadExecutor.execute(
new Runnable() {
@Override
public void run() {
synchronized (monitorC) {
synchronized (monitorD) {
}
}
}
});
mUiThreadExecutor.execute(
new Runnable() {
@Override
public void run() {
synchronized (monitorD) {
synchronized (monitorC) {
}
}
}
});
}
Object monitorE, monitorF;
// deadlock as both work items are scheduled on background threads
public void postOnBGThreadBad() {
mNonUiThreadExecutor.execute(
new Runnable() {
@Override
public void run() {
synchronized (monitorE) {
synchronized (monitorF) {
}
}
}
});
mNonUiThreadExecutor.execute(
new Runnable() {
@Override
public void run() {
synchronized (monitorF) {
synchronized (monitorE) {
}
}
}
});
}
}

@ -0,0 +1,47 @@
/*
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
import android.os.Binder;
import android.os.RemoteException;
import java.util.concurrent.Executor;
class DirectStarvation {
static Binder binder;
// executors are injected and annotated as to what thread they schedule to
@ForUiThread private final Executor mUiThreadExecutor = null;
@ForNonUiThread private final Executor mNonUiThreadExecutor = null;
// call which should not happen on UI thread
private static void doTransact() {
try {
binder.transact(0, null, null, 0);
} catch (RemoteException e) {
}
}
// starvation via scheduling a transaction on UI thread
public void postBlockingCallToUIThreadBad() {
mUiThreadExecutor.execute(
new Runnable() {
@Override
public void run() {
doTransact();
}
});
}
public void postBlockingCallToNonUIThreadOk() {
mNonUiThreadExecutor.execute(
new Runnable() {
@Override
public void run() {
doTransact();
}
});
}
}

@ -0,0 +1,8 @@
/*
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
@interface ForNonUiThread {}

@ -0,0 +1,8 @@
/*
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
@interface ForUiThread {}

@ -0,0 +1,75 @@
/*
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
import android.os.Binder;
import android.os.RemoteException;
import java.util.concurrent.Executor;
class IndirectStarvation {
static Binder binder;
// executors are injected and annotated as to what thread they schedule to
@ForUiThread private final Executor mUiThreadExecutor = null;
@ForNonUiThread private final Executor mNonUiThreadExecutor = null;
// call which should not happen on UI thread
private static void doTransact() {
try {
binder.transact(0, null, null, 0);
} catch (RemoteException e) {
}
}
Object monitorA;
// starvation via locking on UI thread and doing a transaction under that lock
// in a background thread
public void postBlockingCallToBackgroundThreadAndLockBad() {
mUiThreadExecutor.execute(
new Runnable() {
@Override
public void run() {
synchronized (monitorA) {
}
}
});
mNonUiThreadExecutor.execute(
new Runnable() {
@Override
public void run() {
synchronized (monitorA) {
doTransact();
}
}
});
}
Object monitorB, monitorC;
// no starvation, as lock on UI thread is not used for transaction on background thread
public void postBlockingCallToBackgroundThreadAndUseOtherLockOk() {
mUiThreadExecutor.execute(
new Runnable() {
@Override
public void run() {
synchronized (monitorB) {
}
}
});
mNonUiThreadExecutor.execute(
new Runnable() {
@Override
public void run() {
synchronized (monitorC) {
doTransact();
}
}
});
}
}

@ -0,0 +1,12 @@
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
TESTS_DIR = ../../..
INFER_OPTIONS = --starvation-only --starvation-whole-program --debug-exceptions
INFERPRINT_OPTIONS = --issues-tests
SOURCES = $(wildcard *.java)
include $(TESTS_DIR)/javac.make

@ -0,0 +1,59 @@
/*
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
import android.os.Binder;
import android.os.RemoteException;
import java.util.concurrent.Executor;
class ModeledExecutors {
static Binder binder;
private static void doTransact() {
try {
binder.transact(0, null, null, 0);
} catch (RemoteException e) {
}
}
// starvation via scheduling a transaction on UI thread
public void postBlockingCallToUIThreadBad() {
Executors.getForegroundExecutor()
.execute(
new Runnable() {
@Override
public void run() {
doTransact();
}
});
}
public void postBlockingCallToNonUIThreadOk() {
Executors.getBackgroundExecutor()
.execute(
new Runnable() {
@Override
public void run() {
doTransact();
}
});
}
}
// modeled executors
class Executors {
static Executor uiExecutor;
static Executor getForegroundExecutor() {
return uiExecutor;
}
static Executor bgExecutor;
static Executor getBackgroundExecutor() {
return bgExecutor;
}
}

@ -0,0 +1,7 @@
codetoanalyze/java/starvation-whole-program/Deadlock.java, Deadlock.postDeadlockBad():void, 19, DEADLOCK, no_bucket, ERROR, [[Trace 1] `void Deadlock.postDeadlockBad()`,Method call: `void Deadlock$1.run()`, locks `this.monitorA` in `class Deadlock`, locks `this.monitorB` in `class Deadlock`,[Trace 2] `void Deadlock.postDeadlockBad()`,Method call: `void Deadlock$2.run()`, locks `this.monitorB` in `class Deadlock`, locks `this.monitorA` in `class Deadlock`]
codetoanalyze/java/starvation-whole-program/Deadlock.java, Deadlock.postDeadlockBad():void, 30, DEADLOCK, no_bucket, ERROR, [[Trace 1] `void Deadlock.postDeadlockBad()`,Method call: `void Deadlock$2.run()`, locks `this.monitorB` in `class Deadlock`, locks `this.monitorA` in `class Deadlock`,[Trace 2] `void Deadlock.postDeadlockBad()`,Method call: `void Deadlock$1.run()`, locks `this.monitorA` in `class Deadlock`, locks `this.monitorB` in `class Deadlock`]
codetoanalyze/java/starvation-whole-program/Deadlock.java, Deadlock.postOnBGThreadBad():void, 73, DEADLOCK, no_bucket, ERROR, [[Trace 1] `void Deadlock.postOnBGThreadBad()`,Method call: `void Deadlock$5.run()`, locks `this.monitorE` in `class Deadlock`, locks `this.monitorF` in `class Deadlock`,[Trace 2] `void Deadlock.postOnBGThreadBad()`,Method call: `void Deadlock$6.run()`, locks `this.monitorF` in `class Deadlock`, locks `this.monitorE` in `class Deadlock`]
codetoanalyze/java/starvation-whole-program/Deadlock.java, Deadlock.postOnBGThreadBad():void, 84, DEADLOCK, no_bucket, ERROR, [[Trace 1] `void Deadlock.postOnBGThreadBad()`,Method call: `void Deadlock$6.run()`, locks `this.monitorF` in `class Deadlock`, locks `this.monitorE` in `class Deadlock`,[Trace 2] `void Deadlock.postOnBGThreadBad()`,Method call: `void Deadlock$5.run()`, locks `this.monitorE` in `class Deadlock`, locks `this.monitorF` in `class Deadlock`]
codetoanalyze/java/starvation-whole-program/DirectStarvation.java, DirectStarvation.postBlockingCallToUIThreadBad():void, 29, STARVATION, no_bucket, ERROR, [`void DirectStarvation.postBlockingCallToUIThreadBad()`,Method call: `void DirectStarvation$1.run()`,Method call: `void DirectStarvation.doTransact()`,calls `boolean Binder.transact(int,Parcel,Parcel,int)`]
codetoanalyze/java/starvation-whole-program/IndirectStarvation.java, IndirectStarvation.postBlockingCallToBackgroundThreadAndLockBad():void, 32, STARVATION, no_bucket, ERROR, [[Trace 1] `void IndirectStarvation.postBlockingCallToBackgroundThreadAndLockBad()`,Method call: `void IndirectStarvation$1.run()`, locks `this.monitorA` in `class IndirectStarvation`,[Trace 2] `void IndirectStarvation.postBlockingCallToBackgroundThreadAndLockBad()`,Method call: `void IndirectStarvation$2.run()`, locks `this.monitorA` in `class IndirectStarvation`,Method call: `void IndirectStarvation.doTransact()`,calls `boolean Binder.transact(int,Parcel,Parcel,int)`]
codetoanalyze/java/starvation-whole-program/ModeledExecutors.java, ModeledExecutors.postBlockingCallToUIThreadBad():void, 25, STARVATION, no_bucket, ERROR, [`void ModeledExecutors.postBlockingCallToUIThreadBad()`,Method call: `void ModeledExecutors$1.run()`,Method call: `void ModeledExecutors.doTransact()`,calls `boolean Binder.transact(int,Parcel,Parcel,int)`]

@ -1,153 +0,0 @@
/*
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
import android.os.Binder;
import android.os.RemoteException;
import java.util.concurrent.Executor;
@interface ForUiThread {}
@interface ForNonUiThread {}
class ExecutorRunnable {
static Binder binder;
@ForUiThread private final Executor mUiThreadExecutor = null;
@ForNonUiThread private final Executor mNonUiThreadExecutor = null;
private static void doTransact() {
try {
binder.transact(0, null, null, 0);
} catch (RemoteException e) {
}
}
public void FN_postBlockingCallToUIThreadBad() {
mUiThreadExecutor.execute(
new Runnable() {
@Override
public void run() {
doTransact();
}
});
}
public void postBlockingCallToNonUIThreadOk() {
mNonUiThreadExecutor.execute(
new Runnable() {
@Override
public void run() {
doTransact();
}
});
}
Object monitorA, monitorB;
private void lockAB() {
synchronized (monitorA) {
synchronized (monitorB) {
}
}
}
private void lockBA() {
synchronized (monitorB) {
synchronized (monitorA) {
}
}
}
public void FN_postDeadlockBad() {
mUiThreadExecutor.execute(
new Runnable() {
@Override
public void run() {
lockAB();
}
});
mNonUiThreadExecutor.execute(
new Runnable() {
@Override
public void run() {
lockBA();
}
});
}
public void postOnUIThreadOk() {
mUiThreadExecutor.execute(
new Runnable() {
@Override
public void run() {
lockAB();
}
});
mUiThreadExecutor.execute(
new Runnable() {
@Override
public void run() {
lockBA();
}
});
}
public void postDeadlockIndirectBad() {
Executors.getForegroundExecutor()
.execute(
new Runnable() {
@Override
public void run() {
lockAB();
}
});
Executors.getBackgroundExecutor()
.execute(
new Runnable() {
@Override
public void run() {
lockBA();
}
});
}
public void postOnUIThreadIndirectOk() {
Executors.getForegroundExecutor()
.execute(
new Runnable() {
@Override
public void run() {
lockAB();
}
});
Executors.getForegroundExecutor()
.execute(
new Runnable() {
@Override
public void run() {
lockBA();
}
});
}
}
class Executors {
static Executor uiExecutor;
static Executor getForegroundExecutor() {
return uiExecutor;
}
static Executor bgExecutor;
static Executor getBackgroundExecutor() {
return bgExecutor;
}
}
Loading…
Cancel
Save