public class org.apache.hadoop.hive.kafka.KafkaInputFormat extends org.apache.hadoop.mapreduce.InputFormat implements org.apache.hadoop.mapred.InputFormat, org.apache.hadoop.hive.ql.exec.vector.VectorizedInputFormatInterface
{
private static final org.slf4j.Logger LOG;
public void <init>()
{
org.apache.hadoop.hive.kafka.KafkaInputFormat v;
v := @this: org.apache.hadoop.hive.kafka.KafkaInputFormat;
specialinvoke v.<org.apache.hadoop.mapreduce.InputFormat: void <init>()>();
return;
}
public org.apache.hadoop.mapred.InputSplit[] getSplits(org.apache.hadoop.mapred.JobConf, int) throws java.io.IOException
{
java.lang.Object[] v;
java.io.IOException v;
org.apache.hadoop.mapred.InputSplit[] v;
org.apache.hadoop.mapred.JobConf v;
java.util.List v;
java.lang.InterruptedException v;
java.lang.Thread v;
int v, v;
org.apache.hadoop.hive.kafka.KafkaInputFormat v;
v := @this: org.apache.hadoop.hive.kafka.KafkaInputFormat;
v := @parameter: org.apache.hadoop.mapred.JobConf;
v := @parameter: int;
label:
v = specialinvoke v.<org.apache.hadoop.hive.kafka.KafkaInputFormat: java.util.List computeSplits(org.apache.hadoop.conf.Configuration)>(v);
label:
goto label;
label:
v := @caughtexception;
v = staticinvoke <java.lang.Thread: java.lang.Thread currentThread()>();
virtualinvoke v.<java.lang.Thread: void interrupt()>();
v = new java.io.IOException;
specialinvoke v.<java.io.IOException: void <init>(java.lang.Throwable)>(v);
throw v;
label:
v = interfaceinvoke v.<java.util.List: int size()>();
v = newarray (org.apache.hadoop.mapred.InputSplit)[v];
v = interfaceinvoke v.<java.util.List: java.lang.Object[] toArray(java.lang.Object[])>(v);
return v;
catch java.lang.InterruptedException from label to label with label;
}
private static java.util.List buildFullScanFromKafka(java.lang.String, org.apache.kafkaesqueesqueesque.clients.consumer.KafkaConsumer, org.apache.hadoop.fs.Path[], int)
{
java.util.function.Function v, v;
java.util.Map v, v;
int v;
java.util.function.BiConsumer v, v;
java.lang.String v;
java.util.stream.Collector v, v;
boolean v;
org.apache.kafkaesqueesqueesque.clients.consumer.KafkaConsumer v;
org.slf4j.Logger v, v;
java.util.function.Predicate v;
java.lang.RuntimeException v;
org.apache.hadoop.fs.Path[] v;
java.lang.Exception v;
java.util.stream.Stream v, v, v, v;
org.apache.hadoop.hive.kafka.RetryUtils$Task v;
java.lang.Object v, v, v;
v := @parameter: java.lang.String;
v := @parameter: org.apache.kafkaesqueesqueesque.clients.consumer.KafkaConsumer;
v := @parameter: org.apache.hadoop.fs.Path[];
v := @parameter: int;
v = staticinvoke <org.apache.hadoop.hive.kafka.KafkaInputFormat$lambda_buildFullScanFromKafka_0__2: org.apache.hadoop.hive.kafka.RetryUtils$Task bootstrap$(java.lang.String,org.apache.kafkaesqueesqueesque.clients.consumer.KafkaConsumer)>(v, v);
label:
v = staticinvoke <org.apache.hadoop.hive.kafka.KafkaInputFormat$lambda_buildFullScanFromKafka_1__3: java.util.function.Predicate bootstrap$()>();
v = staticinvoke <org.apache.hadoop.hive.kafka.RetryUtils: java.lang.Object retry(org.apache.hadoop.hive.kafka.RetryUtils$Task,java.util.function.Predicate,int)>(v, v, v);
label:
goto label;
label:
v := @caughtexception;
v = new java.lang.RuntimeException;
specialinvoke v.<java.lang.RuntimeException: void <init>(java.lang.Throwable)>(v);
throw v;
label:
v = virtualinvoke v.<org.apache.kafkaesqueesqueesque.clients.consumer.KafkaConsumer: java.util.Map beginningOffsets(java.util.Collection)>(v);
v = virtualinvoke v.<org.apache.kafkaesqueesqueesque.clients.consumer.KafkaConsumer: java.util.Map endOffsets(java.util.Collection)>(v);
v = <org.apache.hadoop.hive.kafka.KafkaInputFormat: org.slf4j.Logger LOG>;
v = interfaceinvoke v.<org.slf4j.Logger: boolean isDebugEnabled()>();
if v == 0 goto label;
v = <org.apache.hadoop.hive.kafka.KafkaInputFormat: org.slf4j.Logger LOG>;
v = interfaceinvoke v.<java.util.List: java.util.stream.Stream stream()>();
v = staticinvoke <org.apache.hadoop.hive.kafka.KafkaInputFormat$toString__5: java.util.function.Function bootstrap$()>();
v = interfaceinvoke v.<java.util.stream.Stream: java.util.stream.Stream map(java.util.function.Function)>(v);
v = staticinvoke <java.util.stream.Collectors: java.util.stream.Collector joining(java.lang.CharSequence)>(",");
v = interfaceinvoke v.<java.util.stream.Stream: java.lang.Object collect(java.util.stream.Collector)>(v);
interfaceinvoke v.<org.slf4j.Logger: void info(java.lang.String,java.lang.Object)>("Found the following partitions [{}]", v);
v = staticinvoke <org.apache.hadoop.hive.kafka.KafkaInputFormat$lambda_buildFullScanFromKafka_2__6: java.util.function.BiConsumer bootstrap$()>();
interfaceinvoke v.<java.util.Map: void forEach(java.util.function.BiConsumer)>(v);
v = staticinvoke <org.apache.hadoop.hive.kafka.KafkaInputFormat$lambda_buildFullScanFromKafka_3__7: java.util.function.BiConsumer bootstrap$()>();
interfaceinvoke v.<java.util.Map: void forEach(java.util.function.BiConsumer)>(v);
label:
v = interfaceinvoke v.<java.util.List: java.util.stream.Stream stream()>();
v = staticinvoke <org.apache.hadoop.hive.kafka.KafkaInputFormat$lambda_buildFullScanFromKafka_4__4: java.util.function.Function bootstrap$(java.util.Map,java.util.Map,org.apache.hadoop.fs.Path[])>(v, v, v);
v = interfaceinvoke v.<java.util.stream.Stream: java.util.stream.Stream map(java.util.function.Function)>(v);
v = staticinvoke <java.util.stream.Collectors: java.util.stream.Collector toList()>();
v = interfaceinvoke v.<java.util.stream.Stream: java.lang.Object collect(java.util.stream.Collector)>(v);
return v;
catch java.lang.Exception from label to label with label;
}
private java.util.List computeSplits(org.apache.hadoop.conf.Configuration) throws java.io.IOException, java.lang.InterruptedException
{
org.apache.hadoop.hive.ql.plan.ExprNodeGenericFuncDesc v;
java.util.concurrent.Future v, v;
org.apache.hadoop.conf.Configuration v;
java.util.stream.Collector v;
boolean v;
com.google.common.collect.ImmutableMap v;
java.util.function.Predicate v;
java.lang.Exception v, v;
java.util.stream.Stream v, v;
java.lang.Throwable v, v;
java.util.concurrent.Callable v, v;
long v;
org.apache.hadoop.hive.kafka.KafkaTableProperties v, v, v;
int v, v;
java.lang.String v, v, v, v, v, v;
java.util.concurrent.ExecutorService v;
org.apache.kafkaesqueesqueesque.clients.consumer.KafkaConsumer v;
org.apache.hadoop.hive.kafka.KafkaScanTrimmer v;
java.util.Properties v;
org.slf4j.Logger v, v, v;
java.io.IOException v;
org.apache.hadoop.fs.Path[] v;
com.google.common.collect.ImmutableMap$Builder v;
java.util.concurrent.TimeUnit v, v;
java.util.function.Consumer v;
org.apache.hadoop.mapred.JobConf v;
org.apache.hadoop.hive.kafka.KafkaInputFormat v;
java.lang.Object v, v, v;
v := @this: org.apache.hadoop.hive.kafka.KafkaInputFormat;
v := @parameter: org.apache.hadoop.conf.Configuration;
v = staticinvoke <java.util.concurrent.Executors: java.util.concurrent.ExecutorService newSingleThreadExecutor()>();
label:
v = new org.apache.kafkaesqueesqueesque.clients.consumer.KafkaConsumer;
v = staticinvoke <org.apache.hadoop.hive.kafka.KafkaUtils: java.util.Properties consumerProperties(org.apache.hadoop.conf.Configuration)>(v);
specialinvoke v.<org.apache.kafkaesqueesqueesque.clients.consumer.KafkaConsumer: void <init>(java.util.Properties)>(v);
label:
v = <org.apache.hadoop.hive.kafka.KafkaTableProperties: org.apache.hadoop.hive.kafka.KafkaTableProperties HIVE_KAFKA_TOPIC>;
v = virtualinvoke v.<org.apache.hadoop.hive.kafka.KafkaTableProperties: java.lang.String getName()>();
v = virtualinvoke v.<org.apache.hadoop.conf.Configuration: java.lang.String get(java.lang.String)>(v);
v = <org.apache.hadoop.hive.kafka.KafkaTableProperties: org.apache.hadoop.hive.kafka.KafkaTableProperties KAFKA_FETCH_METADATA_TIMEOUT>;
v = virtualinvoke v.<org.apache.hadoop.hive.kafka.KafkaTableProperties: java.lang.String getName()>();
v = virtualinvoke v.<org.apache.hadoop.conf.Configuration: long getLong(java.lang.String,long)>(v, -1L);
v = <org.apache.hadoop.hive.kafka.KafkaTableProperties: org.apache.hadoop.hive.kafka.KafkaTableProperties MAX_RETRIES>;
v = virtualinvoke v.<org.apache.hadoop.hive.kafka.KafkaTableProperties: java.lang.String getName()>();
v = (int) -1;
v = virtualinvoke v.<org.apache.hadoop.conf.Configuration: int getInt(java.lang.String,int)>(v, v);
v = new org.apache.hadoop.mapred.JobConf;
specialinvoke v.<org.apache.hadoop.mapred.JobConf: void <init>(org.apache.hadoop.conf.Configuration)>(v);
v = staticinvoke <org.apache.hadoop.mapred.FileInputFormat: org.apache.hadoop.fs.Path[] getInputPaths(org.apache.hadoop.mapred.JobConf)>(v);
v = staticinvoke <org.apache.hadoop.hive.kafka.KafkaInputFormat$lambda_computeSplits_5__8: java.util.concurrent.Callable bootstrap$(java.lang.String,org.apache.kafkaesqueesqueesque.clients.consumer.KafkaConsumer,org.apache.hadoop.fs.Path[],int)>(v, v, v, v);
v = interfaceinvoke v.<java.util.concurrent.ExecutorService: java.util.concurrent.Future submit(java.util.concurrent.Callable)>(v);
label:
v = <java.util.concurrent.TimeUnit: java.util.concurrent.TimeUnit MILLISECONDS>;
v = interfaceinvoke v.<java.util.concurrent.Future: java.lang.Object get(long,java.util.concurrent.TimeUnit)>(v, v);
label:
goto label;
label:
v := @caughtexception;
interfaceinvoke v.<java.util.concurrent.Future: boolean cancel(boolean)>(1);
v = <org.apache.hadoop.hive.kafka.KafkaInputFormat: org.slf4j.Logger LOG>;
interfaceinvoke v.<org.slf4j.Logger: void error(java.lang.String,java.lang.Throwable)>("can not generate full scan split", v);
v = new java.io.IOException;
specialinvoke v.<java.io.IOException: void <init>(java.lang.Throwable)>(v);
throw v;
label:
v = new com.google.common.collect.ImmutableMap$Builder;
specialinvoke v.<com.google.common.collect.ImmutableMap$Builder: void <init>()>();
v = staticinvoke <org.apache.hadoop.hive.kafka.KafkaInputFormat$lambda_computeSplits_6__9: java.util.function.Consumer bootstrap$(com.google.common.collect.ImmutableMap$Builder)>(v);
interfaceinvoke v.<java.util.List: void forEach(java.util.function.Consumer)>(v);
v = new org.apache.hadoop.hive.kafka.KafkaScanTrimmer;
v = virtualinvoke v.<com.google.common.collect.ImmutableMap$Builder: com.google.common.collect.ImmutableMap build()>();
specialinvoke v.<org.apache.hadoop.hive.kafka.KafkaScanTrimmer: void <init>(java.util.Map,org.apache.kafkaesqueesqueesque.clients.consumer.KafkaConsumer)>(v, v);
v = virtualinvoke v.<org.apache.hadoop.conf.Configuration: java.lang.String get(java.lang.String)>("hive.io.filter.expr.serialized");
if v == null goto label;
v = virtualinvoke v.<java.lang.String: boolean isEmpty()>();
if v != 0 goto label;
v = staticinvoke <org.apache.hadoop.hive.ql.exec.SerializationUtilities: org.apache.hadoop.hive.ql.plan.ExprNodeGenericFuncDesc deserializeExpression(java.lang.String)>(v);
v = <org.apache.hadoop.hive.kafka.KafkaInputFormat: org.slf4j.Logger LOG>;
v = virtualinvoke v.<org.apache.hadoop.hive.ql.plan.ExprNodeGenericFuncDesc: java.lang.String getExprString()>();
interfaceinvoke v.<org.slf4j.Logger: void info(java.lang.String,java.lang.Object)>("Kafka trimmer working on Filter tree {}", v);
v = staticinvoke <org.apache.hadoop.hive.kafka.KafkaInputFormat$lambda_computeSplits_7__10: java.util.concurrent.Callable bootstrap$(org.apache.hadoop.hive.kafka.KafkaScanTrimmer,org.apache.hadoop.hive.ql.plan.ExprNodeGenericFuncDesc)>(v, v);
v = interfaceinvoke v.<java.util.concurrent.ExecutorService: java.util.concurrent.Future submit(java.util.concurrent.Callable)>(v);
label:
v = <java.util.concurrent.TimeUnit: java.util.concurrent.TimeUnit MILLISECONDS>;
v = interfaceinvoke v.<java.util.concurrent.Future: java.lang.Object get(long,java.util.concurrent.TimeUnit)>(v, v);
v = interfaceinvoke v.<java.util.List: java.util.stream.Stream stream()>();
v = staticinvoke <org.apache.hadoop.hive.kafka.KafkaInputFormat$lambda_computeSplits_8__11: java.util.function.Predicate bootstrap$()>();
v = interfaceinvoke v.<java.util.stream.Stream: java.util.stream.Stream filter(java.util.function.Predicate)>(v);
v = staticinvoke <java.util.stream.Collectors: java.util.stream.Collector toList()>();
v = interfaceinvoke v.<java.util.stream.Stream: java.lang.Object collect(java.util.stream.Collector)>(v);
label:
virtualinvoke v.<org.apache.kafkaesqueesqueesque.clients.consumer.KafkaConsumer: void close()>();
label:
interfaceinvoke v.<java.util.concurrent.ExecutorService: void shutdown()>();
return v;
label:
v := @caughtexception;
interfaceinvoke v.<java.util.concurrent.Future: boolean cancel(boolean)>(1);
v = <org.apache.hadoop.hive.kafka.KafkaInputFormat: org.slf4j.Logger LOG>;
interfaceinvoke v.<org.slf4j.Logger: void error(java.lang.String,java.lang.Throwable)>("Had issue with trimmer will return full scan ", v);
label:
if v == null goto label;
virtualinvoke v.<org.apache.kafkaesqueesqueesque.clients.consumer.KafkaConsumer: void close()>();
label:
interfaceinvoke v.<java.util.concurrent.ExecutorService: void shutdown()>();
return v;
label:
virtualinvoke v.<org.apache.kafkaesqueesqueesque.clients.consumer.KafkaConsumer: void close()>();
label:
interfaceinvoke v.<java.util.concurrent.ExecutorService: void shutdown()>();
return v;
label:
v := @caughtexception;
throw v;
label:
v := @caughtexception;
interfaceinvoke v.<java.util.concurrent.ExecutorService: void shutdown()>();
throw v;
catch java.util.concurrent.TimeoutException from label to label with label;
catch java.util.concurrent.ExecutionException from label to label with label;
catch java.util.concurrent.ExecutionException from label to label with label;
catch java.util.concurrent.TimeoutException from label to label with label;
catch java.lang.Throwable from label to label with label;
catch java.lang.Throwable from label to label with label;
catch java.lang.Throwable from label to label with label;
catch java.lang.Throwable from label to label with label;
catch java.lang.Throwable from label to label with label;
}
private static java.util.List fetchTopicPartitions(java.lang.String, org.apache.kafkaesqueesqueesque.clients.consumer.KafkaConsumer)
{
org.apache.kafkaesqueesqueesque.clients.consumer.KafkaConsumer v;
java.util.function.Function v;
java.util.List v;
java.util.stream.Stream v, v;
java.lang.Object v;
java.lang.String v;
java.util.stream.Collector v;
v := @parameter: java.lang.String;
v := @parameter: org.apache.kafkaesqueesqueesque.clients.consumer.KafkaConsumer;
v = virtualinvoke v.<org.apache.kafkaesqueesqueesque.clients.consumer.KafkaConsumer: java.util.List partitionsFor(java.lang.String)>(v);
v = interfaceinvoke v.<java.util.List: java.util.stream.Stream stream()>();
v = staticinvoke <org.apache.hadoop.hive.kafka.KafkaInputFormat$lambda_fetchTopicPartitions_9__12: java.util.function.Function bootstrap$(java.lang.String)>(v);
v = interfaceinvoke v.<java.util.stream.Stream: java.util.stream.Stream map(java.util.function.Function)>(v);
v = staticinvoke <java.util.stream.Collectors: java.util.stream.Collector toList()>();
v = interfaceinvoke v.<java.util.stream.Stream: java.lang.Object collect(java.util.stream.Collector)>(v);
return v;
}
public org.apache.hadoop.mapred.RecordReader getRecordReader(org.apache.hadoop.mapred.InputSplit, org.apache.hadoop.mapred.JobConf, org.apache.hadoop.mapred.Reporter)
{
org.apache.hadoop.mapred.JobConf v;
org.apache.hadoop.hive.kafka.VectorizedKafkaRecordReader v;
org.apache.hadoop.hive.kafka.KafkaRecordReader v;
org.apache.hadoop.mapred.Reporter v;
org.apache.hadoop.hive.kafka.KafkaInputFormat v;
org.apache.hadoop.mapred.InputSplit v;
boolean v;
v := @this: org.apache.hadoop.hive.kafka.KafkaInputFormat;
v := @parameter: org.apache.hadoop.mapred.InputSplit;
v := @parameter: org.apache.hadoop.mapred.JobConf;
v := @parameter: org.apache.hadoop.mapred.Reporter;
v = staticinvoke <org.apache.hadoop.hive.ql.exec.Utilities: boolean getIsVectorized(org.apache.hadoop.conf.Configuration)>(v);
if v == 0 goto label;
v = new org.apache.hadoop.hive.kafka.VectorizedKafkaRecordReader;
specialinvoke v.<org.apache.hadoop.hive.kafka.VectorizedKafkaRecordReader: void <init>(org.apache.hadoop.hive.kafka.KafkaInputSplit,org.apache.hadoop.conf.Configuration)>(v, v);
return v;
label:
v = new org.apache.hadoop.hive.kafka.KafkaRecordReader;
specialinvoke v.<org.apache.hadoop.hive.kafka.KafkaRecordReader: void <init>(org.apache.hadoop.hive.kafka.KafkaInputSplit,org.apache.hadoop.conf.Configuration)>(v, v);
return v;
}
public java.util.List getSplits(org.apache.hadoop.mapreduce.JobContext) throws java.io.IOException, java.lang.InterruptedException
{
java.util.function.Function v;
java.util.List v;
java.util.stream.Stream v, v;
org.apache.hadoop.mapreduce.JobContext v;
org.apache.hadoop.conf.Configuration v;
org.apache.hadoop.hive.kafka.KafkaInputFormat v;
java.lang.Object v;
java.util.stream.Collector v;
v := @this: org.apache.hadoop.hive.kafka.KafkaInputFormat;
v := @parameter: org.apache.hadoop.mapreduce.JobContext;
v = interfaceinvoke v.<org.apache.hadoop.mapreduce.JobContext: org.apache.hadoop.conf.Configuration getConfiguration()>();
v = specialinvoke v.<org.apache.hadoop.hive.kafka.KafkaInputFormat: java.util.List computeSplits(org.apache.hadoop.conf.Configuration)>(v);
v = interfaceinvoke v.<java.util.List: java.util.stream.Stream stream()>();
v = staticinvoke <org.apache.hadoop.hive.kafka.KafkaInputFormat$lambda_getSplits_10__13: java.util.function.Function bootstrap$()>();
v = interfaceinvoke v.<java.util.stream.Stream: java.util.stream.Stream map(java.util.function.Function)>(v);
v = staticinvoke <java.util.stream.Collectors: java.util.stream.Collector toList()>();
v = interfaceinvoke v.<java.util.stream.Stream: java.lang.Object collect(java.util.stream.Collector)>(v);
return v;
}
public org.apache.hadoop.mapreduce.RecordReader createRecordReader(org.apache.hadoop.mapreduce.InputSplit, org.apache.hadoop.mapreduce.TaskAttemptContext)
{
org.apache.hadoop.mapreduce.TaskAttemptContext v;
org.apache.hadoop.mapreduce.InputSplit v;
org.apache.hadoop.hive.kafka.KafkaRecordReader v;
org.apache.hadoop.hive.kafka.KafkaInputFormat v;
v := @this: org.apache.hadoop.hive.kafka.KafkaInputFormat;
v := @parameter: org.apache.hadoop.mapreduce.InputSplit;
v := @parameter: org.apache.hadoop.mapreduce.TaskAttemptContext;
v = new org.apache.hadoop.hive.kafka.KafkaRecordReader;
specialinvoke v.<org.apache.hadoop.hive.kafka.KafkaRecordReader: void <init>()>();
return v;
}
public org.apache.hadoop.hive.ql.exec.vector.VectorizedSupport$Support[] getSupportedFeatures()
{
org.apache.hadoop.hive.kafka.KafkaInputFormat v;
org.apache.hadoop.hive.ql.exec.vector.VectorizedSupport$Support[] v;
v := @this: org.apache.hadoop.hive.kafka.KafkaInputFormat;
v = newarray (org.apache.hadoop.hive.ql.exec.vector.VectorizedSupport$Support)[0];
return v;
}
static void <clinit>()
{
org.slf4j.Logger v;
v = staticinvoke <org.slf4j.LoggerFactory: org.slf4j.Logger getLogger(java.lang.Class)>(class "Lorg/apache/hadoop/hive/kafka/KafkaInputFormat;");
<org.apache.hadoop.hive.kafka.KafkaInputFormat: org.slf4j.Logger LOG> = v;
return;
}
}