public class oadd.org.apache.hadoop.util.AsyncDiskService extends java.lang.Object
{
public static final org.slf4j.Logger LOG;
private static final int CORE_THREADS_PER_VOLUME;
private static final int MAXIMUM_THREADS_PER_VOLUME;
private static final long THREADS_KEEP_ALIVE_SECONDS;
private final java.lang.ThreadGroup threadGroup;
private java.util.concurrent.ThreadFactory threadFactory;
private java.util.HashMap executors;
public void <init>(java.lang.String[])
{
java.util.concurrent.ThreadPoolExecutor v;
java.lang.String[] v;
java.util.HashMap v, v;
java.lang.ThreadGroup v;
java.util.concurrent.LinkedBlockingQueue v;
java.util.concurrent.TimeUnit v;
oadd.org.apache.hadoop.util.AsyncDiskService$1 v;
oadd.org.apache.hadoop.util.AsyncDiskService v;
int v, v;
java.lang.String v;
java.util.concurrent.ThreadFactory v;
v := @this: oadd.org.apache.hadoop.util.AsyncDiskService;
v := @parameter: java.lang.String[];
specialinvoke v.<java.lang.Object: void <init>()>();
v = new java.lang.ThreadGroup;
specialinvoke v.<java.lang.ThreadGroup: void <init>(java.lang.String)>("async disk service");
v.<oadd.org.apache.hadoop.util.AsyncDiskService: java.lang.ThreadGroup threadGroup> = v;
v = new java.util.HashMap;
specialinvoke v.<java.util.HashMap: void <init>()>();
v.<oadd.org.apache.hadoop.util.AsyncDiskService: java.util.HashMap executors> = v;
v = new oadd.org.apache.hadoop.util.AsyncDiskService$1;
specialinvoke v.<oadd.org.apache.hadoop.util.AsyncDiskService$1: void <init>(oadd.org.apache.hadoop.util.AsyncDiskService)>(v);
v.<oadd.org.apache.hadoop.util.AsyncDiskService: java.util.concurrent.ThreadFactory threadFactory> = v;
v = 0;
label:
v = lengthof v;
if v >= v goto label;
v = new java.util.concurrent.ThreadPoolExecutor;
v = <java.util.concurrent.TimeUnit: java.util.concurrent.TimeUnit SECONDS>;
v = new java.util.concurrent.LinkedBlockingQueue;
specialinvoke v.<java.util.concurrent.LinkedBlockingQueue: void <init>()>();
v = v.<oadd.org.apache.hadoop.util.AsyncDiskService: java.util.concurrent.ThreadFactory threadFactory>;
specialinvoke v.<java.util.concurrent.ThreadPoolExecutor: void <init>(int,int,long,java.util.concurrent.TimeUnit,java.util.concurrent.BlockingQueue,java.util.concurrent.ThreadFactory)>(1, 4, 60L, v, v, v);
virtualinvoke v.<java.util.concurrent.ThreadPoolExecutor: void allowCoreThreadTimeOut(boolean)>(1);
v = v.<oadd.org.apache.hadoop.util.AsyncDiskService: java.util.HashMap executors>;
v = v[v];
virtualinvoke v.<java.util.HashMap: java.lang.Object put(java.lang.Object,java.lang.Object)>(v, v);
v = v + 1;
goto label;
label:
return;
}
public synchronized void execute(java.lang.String, java.lang.Runnable)
{
oadd.org.apache.hadoop.util.AsyncDiskService v;
java.lang.Object v;
java.util.HashMap v;
java.lang.RuntimeException v;
java.lang.String v, v;
java.lang.Runnable v;
v := @this: oadd.org.apache.hadoop.util.AsyncDiskService;
v := @parameter: java.lang.String;
v := @parameter: java.lang.Runnable;
v = v.<oadd.org.apache.hadoop.util.AsyncDiskService: java.util.HashMap executors>;
v = virtualinvoke v.<java.util.HashMap: java.lang.Object get(java.lang.Object)>(v);
if v != null goto label;
v = new java.lang.RuntimeException;
v = dynamicinvoke "makeConcatWithConstants" <java.lang.String (java.lang.String,java.lang.Runnable)>(v, v) <java.lang.invoke.StringConcatFactory: java.lang.invoke.CallSite makeConcatWithConstants(java.lang.invoke.MethodHandles$Lookup,java.lang.String,java.lang.invoke.MethodType,java.lang.String,java.lang.Object[])>("Cannot find root \u for execution of task \u0001");
specialinvoke v.<java.lang.RuntimeException: void <init>(java.lang.String)>(v);
throw v;
label:
virtualinvoke v.<java.util.concurrent.ThreadPoolExecutor: void execute(java.lang.Runnable)>(v);
return;
}
public synchronized void shutdown()
{
org.slf4j.Logger v;
java.util.Iterator v;
java.util.Set v;
java.util.HashMap v;
oadd.org.apache.hadoop.util.AsyncDiskService v;
java.lang.Object v, v;
boolean v;
v := @this: oadd.org.apache.hadoop.util.AsyncDiskService;
v = <oadd.org.apache.hadoop.util.AsyncDiskService: org.slf4j.Logger LOG>;
interfaceinvoke v.<org.slf4j.Logger: void info(java.lang.String)>("Shutting down all AsyncDiskService threads...");
v = v.<oadd.org.apache.hadoop.util.AsyncDiskService: java.util.HashMap executors>;
v = virtualinvoke v.<java.util.HashMap: java.util.Set entrySet()>();
v = interfaceinvoke v.<java.util.Set: java.util.Iterator iterator()>();
label:
v = interfaceinvoke v.<java.util.Iterator: boolean hasNext()>();
if v == 0 goto label;
v = interfaceinvoke v.<java.util.Iterator: java.lang.Object next()>();
v = interfaceinvoke v.<java.util.Map$Entry: java.lang.Object getValue()>();
virtualinvoke v.<java.util.concurrent.ThreadPoolExecutor: void shutdown()>();
goto label;
label:
return;
}
public synchronized boolean awaitTermination(long) throws java.lang.InterruptedException
{
java.util.HashMap v;
long v, v, v, v, v, v;
boolean v, v;
org.slf4j.Logger v, v;
java.util.Iterator v;
java.util.Set v;
java.util.concurrent.TimeUnit v;
oadd.org.apache.hadoop.util.AsyncDiskService v;
java.lang.Object v, v;
v := @this: oadd.org.apache.hadoop.util.AsyncDiskService;
v := @parameter: long;
v = staticinvoke <oadd.org.apache.hadoop.util.Time: long now()>();
v = v + v;
v = v.<oadd.org.apache.hadoop.util.AsyncDiskService: java.util.HashMap executors>;
v = virtualinvoke v.<java.util.HashMap: java.util.Set entrySet()>();
v = interfaceinvoke v.<java.util.Set: java.util.Iterator iterator()>();
label:
v = interfaceinvoke v.<java.util.Iterator: boolean hasNext()>();
if v == 0 goto label;
v = interfaceinvoke v.<java.util.Iterator: java.lang.Object next()>();
v = interfaceinvoke v.<java.util.Map$Entry: java.lang.Object getValue()>();
v = staticinvoke <oadd.org.apache.hadoop.util.Time: long now()>();
v = v - v;
v = staticinvoke <java.lang.Math: long max(long,long)>(v, 0L);
v = <java.util.concurrent.TimeUnit: java.util.concurrent.TimeUnit MILLISECONDS>;
v = virtualinvoke v.<java.util.concurrent.ThreadPoolExecutor: boolean awaitTermination(long,java.util.concurrent.TimeUnit)>(v, v);
if v != 0 goto label;
v = <oadd.org.apache.hadoop.util.AsyncDiskService: org.slf4j.Logger LOG>;
interfaceinvoke v.<org.slf4j.Logger: void warn(java.lang.String)>("AsyncDiskService awaitTermination timeout.");
return 0;
label:
v = <oadd.org.apache.hadoop.util.AsyncDiskService: org.slf4j.Logger LOG>;
interfaceinvoke v.<org.slf4j.Logger: void info(java.lang.String)>("All AsyncDiskService threads are terminated.");
return 1;
}
public synchronized java.util.List shutdownNow()
{
org.slf4j.Logger v;
java.util.Iterator v;
java.util.Set v;
java.util.HashMap v;
java.util.ArrayList v;
java.util.List v;
oadd.org.apache.hadoop.util.AsyncDiskService v;
java.lang.Object v, v;
boolean v;
v := @this: oadd.org.apache.hadoop.util.AsyncDiskService;
v = <oadd.org.apache.hadoop.util.AsyncDiskService: org.slf4j.Logger LOG>;
interfaceinvoke v.<org.slf4j.Logger: void info(java.lang.String)>("Shutting down all AsyncDiskService threads immediately...");
v = new java.util.ArrayList;
specialinvoke v.<java.util.ArrayList: void <init>()>();
v = v.<oadd.org.apache.hadoop.util.AsyncDiskService: java.util.HashMap executors>;
v = virtualinvoke v.<java.util.HashMap: java.util.Set entrySet()>();
v = interfaceinvoke v.<java.util.Set: java.util.Iterator iterator()>();
label:
v = interfaceinvoke v.<java.util.Iterator: boolean hasNext()>();
if v == 0 goto label;
v = interfaceinvoke v.<java.util.Iterator: java.lang.Object next()>();
v = interfaceinvoke v.<java.util.Map$Entry: java.lang.Object getValue()>();
v = virtualinvoke v.<java.util.concurrent.ThreadPoolExecutor: java.util.List shutdownNow()>();
interfaceinvoke v.<java.util.List: boolean addAll(java.util.Collection)>(v);
goto label;
label:
return v;
}
static void <clinit>()
{
org.slf4j.Logger v;
v = staticinvoke <org.slf4j.LoggerFactory: org.slf4j.Logger getLogger(java.lang.Class)>(class "Loadd/org/apache/hadoop/util/AsyncDiskService;");
<oadd.org.apache.hadoop.util.AsyncDiskService: org.slf4j.Logger LOG> = v;
return;
}
}