public class org.apache.hadoop.hive.kafka.KafkaRecordReader extends org.apache.hadoop.mapreduce.RecordReader implements org.apache.hadoop.mapred.RecordReader
{
private static final org.slf4j.Logger LOG;
private org.apache.kafkaesqueesqueesque.clients.consumer.KafkaConsumer consumer;
private org.apache.hadoop.conf.Configuration config;
private org.apache.hadoop.hive.kafka.KafkaWritable currentWritableValue;
private java.util.Iterator recordsCursor;
private long totalNumberRecords;
private long consumedRecords;
private long readBytes;
private volatile boolean started;
public void <init>()
{
org.apache.hadoop.hive.kafka.KafkaRecordReader v;
v := @this: org.apache.hadoop.hive.kafka.KafkaRecordReader;
specialinvoke v.<org.apache.hadoop.mapreduce.RecordReader: void <init>()>();
v.<org.apache.hadoop.hive.kafka.KafkaRecordReader: org.apache.kafkaesqueesqueesque.clients.consumer.KafkaConsumer consumer> = null;
v.<org.apache.hadoop.hive.kafka.KafkaRecordReader: org.apache.hadoop.conf.Configuration config> = null;
v.<org.apache.hadoop.hive.kafka.KafkaRecordReader: java.util.Iterator recordsCursor> = null;
v.<org.apache.hadoop.hive.kafka.KafkaRecordReader: long totalNumberRecords> = 0L;
v.<org.apache.hadoop.hive.kafka.KafkaRecordReader: long consumedRecords> = 0L;
v.<org.apache.hadoop.hive.kafka.KafkaRecordReader: long readBytes> = 0L;
v.<org.apache.hadoop.hive.kafka.KafkaRecordReader: boolean started> = 0;
return;
}
private void initConsumer()
{
org.apache.kafkaesqueesqueesque.clients.consumer.KafkaConsumer v, v;
java.util.Properties v;
org.slf4j.Logger v, v;
org.apache.hadoop.hive.kafka.KafkaRecordReader v;
org.apache.hadoop.conf.Configuration v;
java.lang.String v;
v := @this: org.apache.hadoop.hive.kafka.KafkaRecordReader;
v = v.<org.apache.hadoop.hive.kafka.KafkaRecordReader: org.apache.kafkaesqueesqueesque.clients.consumer.KafkaConsumer consumer>;
if v != null goto label;
v = <org.apache.hadoop.hive.kafka.KafkaRecordReader: org.slf4j.Logger LOG>;
interfaceinvoke v.<org.slf4j.Logger: void info(java.lang.String)>("Initializing Kafka Consumer");
v = v.<org.apache.hadoop.hive.kafka.KafkaRecordReader: org.apache.hadoop.conf.Configuration config>;
v = staticinvoke <org.apache.hadoop.hive.kafka.KafkaUtils: java.util.Properties consumerProperties(org.apache.hadoop.conf.Configuration)>(v);
v = virtualinvoke v.<java.util.Properties: java.lang.String getProperty(java.lang.String)>("bootstrap.servers");
staticinvoke <com.google.common.base.Preconditions: java.lang.Object checkNotNull(java.lang.Object,java.lang.Object)>(v, "broker end point can not be null");
v = <org.apache.hadoop.hive.kafka.KafkaRecordReader: org.slf4j.Logger LOG>;
interfaceinvoke v.<org.slf4j.Logger: void info(java.lang.String,java.lang.Object)>("Starting Consumer with Kafka broker string [{}]", v);
v = new org.apache.kafkaesqueesqueesque.clients.consumer.KafkaConsumer;
specialinvoke v.<org.apache.kafkaesqueesqueesque.clients.consumer.KafkaConsumer: void <init>(java.util.Properties)>(v);
v.<org.apache.hadoop.hive.kafka.KafkaRecordReader: org.apache.kafkaesqueesqueesque.clients.consumer.KafkaConsumer consumer> = v;
label:
return;
}
public void <init>(org.apache.hadoop.hive.kafka.KafkaInputSplit, org.apache.hadoop.conf.Configuration)
{
org.apache.hadoop.hive.kafka.KafkaRecordReader v;
org.apache.hadoop.hive.kafka.KafkaInputSplit v;
org.apache.hadoop.conf.Configuration v;
v := @this: org.apache.hadoop.hive.kafka.KafkaRecordReader;
v := @parameter: org.apache.hadoop.hive.kafka.KafkaInputSplit;
v := @parameter: org.apache.hadoop.conf.Configuration;
specialinvoke v.<org.apache.hadoop.mapreduce.RecordReader: void <init>()>();
v.<org.apache.hadoop.hive.kafka.KafkaRecordReader: org.apache.kafkaesqueesqueesque.clients.consumer.KafkaConsumer consumer> = null;
v.<org.apache.hadoop.hive.kafka.KafkaRecordReader: org.apache.hadoop.conf.Configuration config> = null;
v.<org.apache.hadoop.hive.kafka.KafkaRecordReader: java.util.Iterator recordsCursor> = null;
v.<org.apache.hadoop.hive.kafka.KafkaRecordReader: long totalNumberRecords> = 0L;
v.<org.apache.hadoop.hive.kafka.KafkaRecordReader: long consumedRecords> = 0L;
v.<org.apache.hadoop.hive.kafka.KafkaRecordReader: long readBytes> = 0L;
v.<org.apache.hadoop.hive.kafka.KafkaRecordReader: boolean started> = 0;
specialinvoke v.<org.apache.hadoop.hive.kafka.KafkaRecordReader: void initialize(org.apache.hadoop.hive.kafka.KafkaInputSplit,org.apache.hadoop.conf.Configuration)>(v, v);
return;
}
private synchronized void initialize(org.apache.hadoop.hive.kafka.KafkaInputSplit, org.apache.hadoop.conf.Configuration)
{
long v, v, v, v, v, v;
org.apache.hadoop.hive.kafka.KafkaTableProperties v;
byte v, v, v;
java.lang.Long v, v, v;
org.apache.hadoop.conf.Configuration v, v;
int v;
java.lang.String v, v;
org.apache.hadoop.hive.kafka.KafkaRecordReader$EmptyIterator v;
org.apache.hadoop.hive.kafka.KafkaInputSplit v;
org.apache.hadoop.hive.kafka.KafkaRecordIterator v;
boolean v, v;
org.apache.kafkaesqueesqueesque.clients.consumer.KafkaConsumer v;
org.slf4j.Logger v;
java.util.Iterator v;
org.apache.hadoop.hive.kafka.KafkaRecordReader v;
org.apache.kafkaesqueesqueesque.common.TopicPartition v;
v := @this: org.apache.hadoop.hive.kafka.KafkaRecordReader;
v := @parameter: org.apache.hadoop.hive.kafka.KafkaInputSplit;
v := @parameter: org.apache.hadoop.conf.Configuration;
v = v.<org.apache.hadoop.hive.kafka.KafkaRecordReader: boolean started>;
if v != 0 goto label;
v.<org.apache.hadoop.hive.kafka.KafkaRecordReader: org.apache.hadoop.conf.Configuration config> = v;
v = virtualinvoke v.<org.apache.hadoop.hive.kafka.KafkaInputSplit: long getStartOffset()>();
v = virtualinvoke v.<org.apache.hadoop.hive.kafka.KafkaInputSplit: long getEndOffset()>();
v = new org.apache.kafkaesqueesqueesque.common.TopicPartition;
v = virtualinvoke v.<org.apache.hadoop.hive.kafka.KafkaInputSplit: java.lang.String getTopic()>();
v = virtualinvoke v.<org.apache.hadoop.hive.kafka.KafkaInputSplit: int getPartition()>();
specialinvoke v.<org.apache.kafkaesqueesqueesque.common.TopicPartition: void <init>(java.lang.String,int)>(v, v);
v = v cmp 0L;
if v < 0 goto label;
v = v cmp v;
if v > 0 goto label;
v = 1;
goto label;
label:
v = 0;
label:
staticinvoke <com.google.common.base.Preconditions: void checkState(boolean,java.lang.String,long,long)>(v, "Start [%s] has to be positive and Less than or equal to End [%s]", v, v);
v = v.<org.apache.hadoop.hive.kafka.KafkaRecordReader: long totalNumberRecords>;
v = v - v;
v = v + v;
v.<org.apache.hadoop.hive.kafka.KafkaRecordReader: long totalNumberRecords> = v;
specialinvoke v.<org.apache.hadoop.hive.kafka.KafkaRecordReader: void initConsumer()>();
v = v.<org.apache.hadoop.hive.kafka.KafkaRecordReader: org.apache.hadoop.conf.Configuration config>;
v = <org.apache.hadoop.hive.kafka.KafkaTableProperties: org.apache.hadoop.hive.kafka.KafkaTableProperties KAFKA_POLL_TIMEOUT>;
v = virtualinvoke v.<org.apache.hadoop.hive.kafka.KafkaTableProperties: java.lang.String getName()>();
v = virtualinvoke v.<org.apache.hadoop.conf.Configuration: long getLong(java.lang.String,long)>(v, -1L);
v = <org.apache.hadoop.hive.kafka.KafkaRecordReader: org.slf4j.Logger LOG>;
v = staticinvoke <java.lang.Long: java.lang.Long valueOf(long)>(v);
interfaceinvoke v.<org.slf4j.Logger: void debug(java.lang.String,java.lang.Object)>("Consumer poll timeout [{}] ms", v);
v = v cmp v;
if v != 0 goto label;
v = new org.apache.hadoop.hive.kafka.KafkaRecordReader$EmptyIterator;
v = v;
specialinvoke v.<org.apache.hadoop.hive.kafka.KafkaRecordReader$EmptyIterator: void <init>()>();
goto label;
label:
v = new org.apache.hadoop.hive.kafka.KafkaRecordIterator;
v = v;
v = v.<org.apache.hadoop.hive.kafka.KafkaRecordReader: org.apache.kafkaesqueesqueesque.clients.consumer.KafkaConsumer consumer>;
v = staticinvoke <java.lang.Long: java.lang.Long valueOf(long)>(v);
v = staticinvoke <java.lang.Long: java.lang.Long valueOf(long)>(v);
specialinvoke v.<org.apache.hadoop.hive.kafka.KafkaRecordIterator: void <init>(org.apache.kafkaesqueesqueesque.clients.consumer.Consumer,org.apache.kafkaesqueesqueesque.common.TopicPartition,java.lang.Long,java.lang.Long,long)>(v, v, v, v, v);
label:
v.<org.apache.hadoop.hive.kafka.KafkaRecordReader: java.util.Iterator recordsCursor> = v;
v.<org.apache.hadoop.hive.kafka.KafkaRecordReader: boolean started> = 1;
label:
return;
}
public void initialize(org.apache.hadoop.mapreduce.InputSplit, org.apache.hadoop.mapreduce.TaskAttemptContext)
{
org.apache.hadoop.mapreduce.TaskAttemptContext v;
org.apache.hadoop.mapreduce.InputSplit v;
org.apache.hadoop.hive.kafka.KafkaRecordReader v;
org.apache.hadoop.conf.Configuration v;
v := @this: org.apache.hadoop.hive.kafka.KafkaRecordReader;
v := @parameter: org.apache.hadoop.mapreduce.InputSplit;
v := @parameter: org.apache.hadoop.mapreduce.TaskAttemptContext;
v = interfaceinvoke v.<org.apache.hadoop.mapreduce.TaskAttemptContext: org.apache.hadoop.conf.Configuration getConfiguration()>();
specialinvoke v.<org.apache.hadoop.hive.kafka.KafkaRecordReader: void initialize(org.apache.hadoop.hive.kafka.KafkaInputSplit,org.apache.hadoop.conf.Configuration)>(v, v);
return;
}
public boolean next(org.apache.hadoop.io.NullWritable, org.apache.hadoop.hive.kafka.KafkaWritable)
{
org.apache.hadoop.io.NullWritable v;
java.util.Iterator v, v;
org.apache.hadoop.hive.kafka.KafkaRecordReader v;
long v, v, v, v;
org.apache.hadoop.hive.kafka.KafkaWritable v;
int v;
java.lang.Object v;
boolean v, v;
v := @this: org.apache.hadoop.hive.kafka.KafkaRecordReader;
v := @parameter: org.apache.hadoop.io.NullWritable;
v := @parameter: org.apache.hadoop.hive.kafka.KafkaWritable;
v = v.<org.apache.hadoop.hive.kafka.KafkaRecordReader: boolean started>;
if v == 0 goto label;
v = v.<org.apache.hadoop.hive.kafka.KafkaRecordReader: java.util.Iterator recordsCursor>;
v = interfaceinvoke v.<java.util.Iterator: boolean hasNext()>();
if v == 0 goto label;
v = v.<org.apache.hadoop.hive.kafka.KafkaRecordReader: java.util.Iterator recordsCursor>;
v = interfaceinvoke v.<java.util.Iterator: java.lang.Object next()>();
virtualinvoke v.<org.apache.hadoop.hive.kafka.KafkaWritable: void set(org.apache.kafkaesqueesqueesque.clients.consumer.ConsumerRecord)>(v);
v = v.<org.apache.hadoop.hive.kafka.KafkaRecordReader: long consumedRecords>;
v = v + 1L;
v.<org.apache.hadoop.hive.kafka.KafkaRecordReader: long consumedRecords> = v;
v = v.<org.apache.hadoop.hive.kafka.KafkaRecordReader: long readBytes>;
v = virtualinvoke v.<org.apache.kafkaesqueesqueesque.clients.consumer.ConsumerRecord: int serializedValueSize()>();
v = v + v;
v.<org.apache.hadoop.hive.kafka.KafkaRecordReader: long readBytes> = v;
return 1;
label:
return 0;
}
public org.apache.hadoop.io.NullWritable createKey()
{
org.apache.hadoop.io.NullWritable v;
org.apache.hadoop.hive.kafka.KafkaRecordReader v;
v := @this: org.apache.hadoop.hive.kafka.KafkaRecordReader;
v = staticinvoke <org.apache.hadoop.io.NullWritable: org.apache.hadoop.io.NullWritable get()>();
return v;
}
public org.apache.hadoop.hive.kafka.KafkaWritable createValue()
{
org.apache.hadoop.hive.kafka.KafkaRecordReader v;
org.apache.hadoop.hive.kafka.KafkaWritable v;
v := @this: org.apache.hadoop.hive.kafka.KafkaRecordReader;
v = new org.apache.hadoop.hive.kafka.KafkaWritable;
specialinvoke v.<org.apache.hadoop.hive.kafka.KafkaWritable: void <init>()>();
return v;
}
public long getPos()
{
org.apache.hadoop.hive.kafka.KafkaRecordReader v;
v := @this: org.apache.hadoop.hive.kafka.KafkaRecordReader;
return -1L;
}
public boolean nextKeyValue()
{
org.apache.hadoop.io.NullWritable v;
org.apache.hadoop.hive.kafka.KafkaRecordReader v;
org.apache.hadoop.hive.kafka.KafkaWritable v, v;
boolean v;
v := @this: org.apache.hadoop.hive.kafka.KafkaRecordReader;
v = new org.apache.hadoop.hive.kafka.KafkaWritable;
specialinvoke v.<org.apache.hadoop.hive.kafka.KafkaWritable: void <init>()>();
v.<org.apache.hadoop.hive.kafka.KafkaRecordReader: org.apache.hadoop.hive.kafka.KafkaWritable currentWritableValue> = v;
v = staticinvoke <org.apache.hadoop.io.NullWritable: org.apache.hadoop.io.NullWritable get()>();
v = v.<org.apache.hadoop.hive.kafka.KafkaRecordReader: org.apache.hadoop.hive.kafka.KafkaWritable currentWritableValue>;
v = virtualinvoke v.<org.apache.hadoop.hive.kafka.KafkaRecordReader: boolean next(org.apache.hadoop.io.NullWritable,org.apache.hadoop.hive.kafka.KafkaWritable)>(v, v);
if v == 0 goto label;
return 1;
label:
v.<org.apache.hadoop.hive.kafka.KafkaRecordReader: org.apache.hadoop.hive.kafka.KafkaWritable currentWritableValue> = null;
return 0;
}
public org.apache.hadoop.io.NullWritable getCurrentKey()
{
org.apache.hadoop.io.NullWritable v;
org.apache.hadoop.hive.kafka.KafkaRecordReader v;
v := @this: org.apache.hadoop.hive.kafka.KafkaRecordReader;
v = staticinvoke <org.apache.hadoop.io.NullWritable: org.apache.hadoop.io.NullWritable get()>();
return v;
}
public org.apache.hadoop.hive.kafka.KafkaWritable getCurrentValue()
{
java.lang.Object v;
org.apache.hadoop.hive.kafka.KafkaRecordReader v;
org.apache.hadoop.hive.kafka.KafkaWritable v;
v := @this: org.apache.hadoop.hive.kafka.KafkaRecordReader;
v = v.<org.apache.hadoop.hive.kafka.KafkaRecordReader: org.apache.hadoop.hive.kafka.KafkaWritable currentWritableValue>;
v = staticinvoke <com.google.common.base.Preconditions: java.lang.Object checkNotNull(java.lang.Object)>(v);
return v;
}
public float getProgress()
{
org.apache.hadoop.hive.kafka.KafkaRecordReader v;
long v, v, v, v, v;
byte v, v;
float v, v;
v := @this: org.apache.hadoop.hive.kafka.KafkaRecordReader;
v = v.<org.apache.hadoop.hive.kafka.KafkaRecordReader: long consumedRecords>;
v = v cmp 0L;
if v != 0 goto label;
return 0.0F;
label:
v = v.<org.apache.hadoop.hive.kafka.KafkaRecordReader: long consumedRecords>;
v = v.<org.apache.hadoop.hive.kafka.KafkaRecordReader: long totalNumberRecords>;
v = v cmp v;
if v < 0 goto label;
return 1.0F;
label:
v = v.<org.apache.hadoop.hive.kafka.KafkaRecordReader: long consumedRecords>;
v = v * 1.0F;
v = v.<org.apache.hadoop.hive.kafka.KafkaRecordReader: long totalNumberRecords>;
v = v / v;
return v;
}
public void close()
{
org.apache.kafkaesqueesqueesque.clients.consumer.KafkaConsumer v, v, v;
org.slf4j.Logger v;
java.lang.Long v;
org.apache.hadoop.hive.kafka.KafkaRecordReader v;
long v;
v := @this: org.apache.hadoop.hive.kafka.KafkaRecordReader;
v = <org.apache.hadoop.hive.kafka.KafkaRecordReader: org.slf4j.Logger LOG>;
v = v.<org.apache.hadoop.hive.kafka.KafkaRecordReader: long readBytes>;
v = staticinvoke <java.lang.Long: java.lang.Long valueOf(long)>(v);
interfaceinvoke v.<org.slf4j.Logger: void trace(java.lang.String,java.lang.Object)>("total read bytes [{}]", v);
v = v.<org.apache.hadoop.hive.kafka.KafkaRecordReader: org.apache.kafkaesqueesqueesque.clients.consumer.KafkaConsumer consumer>;
if v == null goto label;
v = v.<org.apache.hadoop.hive.kafka.KafkaRecordReader: org.apache.kafkaesqueesqueesque.clients.consumer.KafkaConsumer consumer>;
virtualinvoke v.<org.apache.kafkaesqueesqueesque.clients.consumer.KafkaConsumer: void wakeup()>();
v = v.<org.apache.hadoop.hive.kafka.KafkaRecordReader: org.apache.kafkaesqueesqueesque.clients.consumer.KafkaConsumer consumer>;
virtualinvoke v.<org.apache.kafkaesqueesqueesque.clients.consumer.KafkaConsumer: void close()>();
label:
return;
}
static void <clinit>()
{
org.slf4j.Logger v;
v = staticinvoke <org.slf4j.LoggerFactory: org.slf4j.Logger getLogger(java.lang.Class)>(class "Lorg/apache/hadoop/hive/kafka/KafkaRecordReader;");
<org.apache.hadoop.hive.kafka.KafkaRecordReader: org.slf4j.Logger LOG> = v;
return;
}
}