public class org.apache.drill.exec.store.kafka.KafkaGroupScan extends org.apache.drill.exec.physical.base.AbstractGroupScan
{
private static final org.slf4j.Logger logger;
private static final long MSG_SIZE;
private final org.apache.drill.exec.store.kafka.KafkaStoragePlugin kafkaStoragePlugin;
private final org.apache.drill.exec.store.kafka.KafkaScanSpec kafkaScanSpec;
private final java.util.List columns;
private final int records;
private org.apache.drill.shaded.guava.com.google.common.collect.ListMultimap assignments;
private java.util.List affinities;
private java.util.Map partitionWorkMap;
public void <init>(java.lang.String, org.apache.drill.exec.store.kafka.KafkaStoragePluginConfig, java.util.List, int, org.apache.drill.exec.store.kafka.KafkaScanSpec, org.apache.drill.exec.store.StoragePluginRegistry) throws org.apache.drill.common.exceptions.ExecutionSetupException
{
org.apache.drill.exec.store.StoragePluginRegistry v;
org.apache.drill.exec.store.kafka.KafkaScanSpec v;
org.apache.drill.exec.store.StoragePlugin v;
org.apache.drill.exec.store.kafka.KafkaStoragePluginConfig v;
org.apache.drill.exec.store.kafka.KafkaGroupScan v;
java.util.List v;
int v;
java.lang.String v;
v := @this: org.apache.drill.exec.store.kafka.KafkaGroupScan;
v := @parameter: java.lang.String;
v := @parameter: org.apache.drill.exec.store.kafka.KafkaStoragePluginConfig;
v := @parameter: java.util.List;
v := @parameter: int;
v := @parameter: org.apache.drill.exec.store.kafka.KafkaScanSpec;
v := @parameter: org.apache.drill.exec.store.StoragePluginRegistry;
v = interfaceinvoke v.<org.apache.drill.exec.store.StoragePluginRegistry: org.apache.drill.exec.store.StoragePlugin resolve(org.apache.drill.common.logical.StoragePluginConfig,java.lang.Class)>(v, class "Lorg/apache/drill/exec/store/kafka/KafkaStoragePlugin;");
specialinvoke v.<org.apache.drill.exec.store.kafka.KafkaGroupScan: void <init>(java.lang.String,org.apache.drill.exec.store.kafka.KafkaStoragePlugin,java.util.List,int,org.apache.drill.exec.store.kafka.KafkaScanSpec)>(v, v, v, v, v);
return;
}
public void <init>(org.apache.drill.exec.store.kafka.KafkaStoragePlugin, org.apache.drill.exec.store.kafka.KafkaScanSpec, java.util.List, int)
{
org.apache.drill.exec.store.kafka.KafkaGroupScan v;
org.apache.drill.exec.store.kafka.KafkaStoragePlugin v;
java.util.List v;
org.apache.drill.exec.store.kafka.KafkaScanSpec v;
int v;
v := @this: org.apache.drill.exec.store.kafka.KafkaGroupScan;
v := @parameter: org.apache.drill.exec.store.kafka.KafkaStoragePlugin;
v := @parameter: org.apache.drill.exec.store.kafka.KafkaScanSpec;
v := @parameter: java.util.List;
v := @parameter: int;
specialinvoke v.<org.apache.drill.exec.physical.base.AbstractGroupScan: void <init>(java.lang.String)>("");
v.<org.apache.drill.exec.store.kafka.KafkaGroupScan: org.apache.drill.exec.store.kafka.KafkaStoragePlugin kafkaStoragePlugin> = v;
v.<org.apache.drill.exec.store.kafka.KafkaGroupScan: java.util.List columns> = v;
v.<org.apache.drill.exec.store.kafka.KafkaGroupScan: int records> = v;
v.<org.apache.drill.exec.store.kafka.KafkaGroupScan: org.apache.drill.exec.store.kafka.KafkaScanSpec kafkaScanSpec> = v;
specialinvoke v.<org.apache.drill.exec.store.kafka.KafkaGroupScan: void init()>();
return;
}
public void <init>(java.lang.String, org.apache.drill.exec.store.kafka.KafkaStoragePlugin, java.util.List, int, org.apache.drill.exec.store.kafka.KafkaScanSpec)
{
org.apache.drill.exec.store.kafka.KafkaGroupScan v;
org.apache.drill.exec.store.kafka.KafkaStoragePlugin v;
java.util.List v;
org.apache.drill.exec.store.kafka.KafkaScanSpec v;
int v;
java.lang.String v;
v := @this: org.apache.drill.exec.store.kafka.KafkaGroupScan;
v := @parameter: java.lang.String;
v := @parameter: org.apache.drill.exec.store.kafka.KafkaStoragePlugin;
v := @parameter: java.util.List;
v := @parameter: int;
v := @parameter: org.apache.drill.exec.store.kafka.KafkaScanSpec;
specialinvoke v.<org.apache.drill.exec.physical.base.AbstractGroupScan: void <init>(java.lang.String)>(v);
v.<org.apache.drill.exec.store.kafka.KafkaGroupScan: org.apache.drill.exec.store.kafka.KafkaStoragePlugin kafkaStoragePlugin> = v;
v.<org.apache.drill.exec.store.kafka.KafkaGroupScan: java.util.List columns> = v;
v.<org.apache.drill.exec.store.kafka.KafkaGroupScan: int records> = v;
v.<org.apache.drill.exec.store.kafka.KafkaGroupScan: org.apache.drill.exec.store.kafka.KafkaScanSpec kafkaScanSpec> = v;
specialinvoke v.<org.apache.drill.exec.store.kafka.KafkaGroupScan: void init()>();
return;
}
public void <init>(org.apache.drill.exec.store.kafka.KafkaGroupScan)
{
org.apache.drill.exec.store.kafka.KafkaScanSpec v;
org.apache.drill.exec.store.kafka.KafkaGroupScan v, v;
org.apache.drill.exec.store.kafka.KafkaStoragePlugin v;
java.util.List v;
org.apache.drill.shaded.guava.com.google.common.collect.ListMultimap v;
java.util.Map v;
int v;
v := @this: org.apache.drill.exec.store.kafka.KafkaGroupScan;
v := @parameter: org.apache.drill.exec.store.kafka.KafkaGroupScan;
specialinvoke v.<org.apache.drill.exec.physical.base.AbstractGroupScan: void <init>(org.apache.drill.exec.physical.base.AbstractGroupScan)>(v);
v = v.<org.apache.drill.exec.store.kafka.KafkaGroupScan: org.apache.drill.exec.store.kafka.KafkaStoragePlugin kafkaStoragePlugin>;
v.<org.apache.drill.exec.store.kafka.KafkaGroupScan: org.apache.drill.exec.store.kafka.KafkaStoragePlugin kafkaStoragePlugin> = v;
v = v.<org.apache.drill.exec.store.kafka.KafkaGroupScan: java.util.List columns>;
v.<org.apache.drill.exec.store.kafka.KafkaGroupScan: java.util.List columns> = v;
v = v.<org.apache.drill.exec.store.kafka.KafkaGroupScan: int records>;
v.<org.apache.drill.exec.store.kafka.KafkaGroupScan: int records> = v;
v = v.<org.apache.drill.exec.store.kafka.KafkaGroupScan: org.apache.drill.exec.store.kafka.KafkaScanSpec kafkaScanSpec>;
v.<org.apache.drill.exec.store.kafka.KafkaGroupScan: org.apache.drill.exec.store.kafka.KafkaScanSpec kafkaScanSpec> = v;
v = v.<org.apache.drill.exec.store.kafka.KafkaGroupScan: org.apache.drill.shaded.guava.com.google.common.collect.ListMultimap assignments>;
v.<org.apache.drill.exec.store.kafka.KafkaGroupScan: org.apache.drill.shaded.guava.com.google.common.collect.ListMultimap assignments> = v;
v = v.<org.apache.drill.exec.store.kafka.KafkaGroupScan: java.util.Map partitionWorkMap>;
v.<org.apache.drill.exec.store.kafka.KafkaGroupScan: java.util.Map partitionWorkMap> = v;
return;
}
public void <init>(org.apache.drill.exec.store.kafka.KafkaGroupScan, java.util.List)
{
org.apache.drill.exec.store.kafka.KafkaScanSpec v;
org.apache.drill.exec.store.kafka.KafkaGroupScan v, v;
org.apache.drill.exec.store.kafka.KafkaStoragePlugin v;
java.util.List v;
org.apache.drill.shaded.guava.com.google.common.collect.ListMultimap v;
java.util.Map v;
int v;
v := @this: org.apache.drill.exec.store.kafka.KafkaGroupScan;
v := @parameter: org.apache.drill.exec.store.kafka.KafkaGroupScan;
v := @parameter: java.util.List;
specialinvoke v.<org.apache.drill.exec.physical.base.AbstractGroupScan: void <init>(org.apache.drill.exec.physical.base.AbstractGroupScan)>(v);
v = v.<org.apache.drill.exec.store.kafka.KafkaGroupScan: org.apache.drill.exec.store.kafka.KafkaStoragePlugin kafkaStoragePlugin>;
v.<org.apache.drill.exec.store.kafka.KafkaGroupScan: org.apache.drill.exec.store.kafka.KafkaStoragePlugin kafkaStoragePlugin> = v;
v.<org.apache.drill.exec.store.kafka.KafkaGroupScan: java.util.List columns> = v;
v = v.<org.apache.drill.exec.store.kafka.KafkaGroupScan: int records>;
v.<org.apache.drill.exec.store.kafka.KafkaGroupScan: int records> = v;
v = v.<org.apache.drill.exec.store.kafka.KafkaGroupScan: org.apache.drill.exec.store.kafka.KafkaScanSpec kafkaScanSpec>;
v.<org.apache.drill.exec.store.kafka.KafkaGroupScan: org.apache.drill.exec.store.kafka.KafkaScanSpec kafkaScanSpec> = v;
v = v.<org.apache.drill.exec.store.kafka.KafkaGroupScan: org.apache.drill.shaded.guava.com.google.common.collect.ListMultimap assignments>;
v.<org.apache.drill.exec.store.kafka.KafkaGroupScan: org.apache.drill.shaded.guava.com.google.common.collect.ListMultimap assignments> = v;
v = v.<org.apache.drill.exec.store.kafka.KafkaGroupScan: java.util.Map partitionWorkMap>;
v.<org.apache.drill.exec.store.kafka.KafkaGroupScan: java.util.Map partitionWorkMap> = v;
return;
}
public void <init>(org.apache.drill.exec.store.kafka.KafkaGroupScan, int)
{
org.apache.drill.exec.store.kafka.KafkaScanSpec v;
org.apache.drill.exec.store.kafka.KafkaGroupScan v, v;
org.apache.drill.exec.store.kafka.KafkaStoragePlugin v;
java.util.List v;
org.apache.drill.shaded.guava.com.google.common.collect.ListMultimap v;
java.util.Map v;
int v;
v := @this: org.apache.drill.exec.store.kafka.KafkaGroupScan;
v := @parameter: org.apache.drill.exec.store.kafka.KafkaGroupScan;
v := @parameter: int;
specialinvoke v.<org.apache.drill.exec.physical.base.AbstractGroupScan: void <init>(org.apache.drill.exec.physical.base.AbstractGroupScan)>(v);
v = v.<org.apache.drill.exec.store.kafka.KafkaGroupScan: org.apache.drill.exec.store.kafka.KafkaStoragePlugin kafkaStoragePlugin>;
v.<org.apache.drill.exec.store.kafka.KafkaGroupScan: org.apache.drill.exec.store.kafka.KafkaStoragePlugin kafkaStoragePlugin> = v;
v = v.<org.apache.drill.exec.store.kafka.KafkaGroupScan: java.util.List columns>;
v.<org.apache.drill.exec.store.kafka.KafkaGroupScan: java.util.List columns> = v;
v.<org.apache.drill.exec.store.kafka.KafkaGroupScan: int records> = v;
v = v.<org.apache.drill.exec.store.kafka.KafkaGroupScan: org.apache.drill.exec.store.kafka.KafkaScanSpec kafkaScanSpec>;
v.<org.apache.drill.exec.store.kafka.KafkaGroupScan: org.apache.drill.exec.store.kafka.KafkaScanSpec kafkaScanSpec> = v;
v = v.<org.apache.drill.exec.store.kafka.KafkaGroupScan: org.apache.drill.shaded.guava.com.google.common.collect.ListMultimap assignments>;
v.<org.apache.drill.exec.store.kafka.KafkaGroupScan: org.apache.drill.shaded.guava.com.google.common.collect.ListMultimap assignments> = v;
v = v.<org.apache.drill.exec.store.kafka.KafkaGroupScan: java.util.Map partitionWorkMap>;
v.<org.apache.drill.exec.store.kafka.KafkaGroupScan: java.util.Map partitionWorkMap> = v;
return;
}
private void init()
{
org.apache.drill.exec.store.kafka.KafkaGroupScan$PartitionScanWork v;
org.apache.drill.common.exceptions.UserException v, v;
org.apache.drill.exec.store.kafka.KafkaGroupScan v;
java.lang.Long v, v, v, v;
java.time.Duration v;
java.util.Map v, v;
java.util.stream.Collector v;
org.apache.kafka.common.Node[] v;
java.util.Set v;
org.apache.drill.exec.store.kafka.KafkaStoragePluginConfig v;
java.lang.Exception v;
org.apache.drill.exec.store.kafka.KafkaStoragePlugin v, v, v, v;
java.util.stream.Stream v;
org.apache.kafka.common.Node v;
org.apache.kafka.clients.consumer.KafkaConsumer v, v;
java.lang.Throwable v;
java.lang.Object[] v, v;
org.apache.drill.common.exceptions.UserException$Builder v, v, v, v, v;
long v, v, v, v, v;
org.apache.drill.exec.server.DrillbitContext v;
java.lang.String v, v, v, v, v;
org.apache.drill.exec.store.schedule.EndpointByteMap v;
org.apache.kafka.common.serialization.ByteArrayDeserializer v, v;
java.util.Properties v;
boolean v, v, v, v;
org.apache.kafka.common.TopicPartition v;
java.util.Collection v;
java.util.function.BinaryOperator v;
java.util.List v, v;
java.util.HashMap v, v, v;
java.util.function.Function v, v;
org.apache.drill.exec.store.kafka.KafkaPartitionScanSpec v;
org.apache.drill.exec.store.schedule.EndpointByteMapImpl v;
int v, v, v, v;
org.apache.drill.exec.store.kafka.KafkaScanSpec v;
org.slf4j.Logger v, v, v, v;
java.util.Iterator v, v, v;
java.lang.Object v, v, v, v, v, v, v;
v := @this: org.apache.drill.exec.store.kafka.KafkaGroupScan;
v = staticinvoke <org.apache.drill.shaded.guava.com.google.common.collect.Maps: java.util.HashMap newHashMap()>();
v.<org.apache.drill.exec.store.kafka.KafkaGroupScan: java.util.Map partitionWorkMap> = v;
v = v.<org.apache.drill.exec.store.kafka.KafkaGroupScan: org.apache.drill.exec.store.kafka.KafkaStoragePlugin kafkaStoragePlugin>;
v = virtualinvoke v.<org.apache.drill.exec.store.kafka.KafkaStoragePlugin: org.apache.drill.exec.server.DrillbitContext getContext()>();
v = virtualinvoke v.<org.apache.drill.exec.server.DrillbitContext: java.util.Collection getBits()>();
v = interfaceinvoke v.<java.util.Collection: java.util.stream.Stream stream()>();
v = staticinvoke <org.apache.drill.exec.store.kafka.KafkaGroupScan$getAddress__2: java.util.function.Function bootstrap$()>();
v = staticinvoke <java.util.function.Function: java.util.function.Function identity()>();
v = staticinvoke <org.apache.drill.exec.store.kafka.KafkaGroupScan$lambda_init_0__3: java.util.function.BinaryOperator bootstrap$()>();
v = staticinvoke <java.util.stream.Collectors: java.util.stream.Collector toMap(java.util.function.Function,java.util.function.Function,java.util.function.BinaryOperator)>(v, v, v);
v = interfaceinvoke v.<java.util.stream.Stream: java.lang.Object collect(java.util.stream.Collector)>(v);
v = staticinvoke <org.apache.drill.shaded.guava.com.google.common.collect.Maps: java.util.HashMap newHashMap()>();
v = staticinvoke <org.apache.drill.shaded.guava.com.google.common.collect.Maps: java.util.HashMap newHashMap()>();
v = v.<org.apache.drill.exec.store.kafka.KafkaGroupScan: org.apache.drill.exec.store.kafka.KafkaScanSpec kafkaScanSpec>;
v = virtualinvoke v.<org.apache.drill.exec.store.kafka.KafkaScanSpec: java.lang.String getTopicName()>();
v = null;
label:
v = new org.apache.kafka.clients.consumer.KafkaConsumer;
v = v.<org.apache.drill.exec.store.kafka.KafkaGroupScan: org.apache.drill.exec.store.kafka.KafkaStoragePlugin kafkaStoragePlugin>;
v = virtualinvoke v.<org.apache.drill.exec.store.kafka.KafkaStoragePlugin: org.apache.drill.exec.store.kafka.KafkaStoragePluginConfig getConfig()>();
v = virtualinvoke v.<org.apache.drill.exec.store.kafka.KafkaStoragePluginConfig: java.util.Properties getKafkaConsumerProps()>();
v = new org.apache.kafka.common.serialization.ByteArrayDeserializer;
specialinvoke v.<org.apache.kafka.common.serialization.ByteArrayDeserializer: void <init>()>();
v = new org.apache.kafka.common.serialization.ByteArrayDeserializer;
specialinvoke v.<org.apache.kafka.common.serialization.ByteArrayDeserializer: void <init>()>();
specialinvoke v.<org.apache.kafka.clients.consumer.KafkaConsumer: void <init>(java.util.Properties,org.apache.kafka.common.serialization.Deserializer,org.apache.kafka.common.serialization.Deserializer)>(v, v, v);
v = v;
v = virtualinvoke v.<org.apache.kafka.clients.consumer.KafkaConsumer: java.util.Map listTopics()>();
v = interfaceinvoke v.<java.util.Map: boolean containsKey(java.lang.Object)>(v);
if v != 0 goto label;
v = staticinvoke <org.apache.drill.common.exceptions.UserException: org.apache.drill.common.exceptions.UserException$Builder dataReadError()>();
v = newarray (java.lang.Object)[1];
v[0] = v;
v = virtualinvoke v.<org.apache.drill.common.exceptions.UserException$Builder: org.apache.drill.common.exceptions.UserException$Builder message(java.lang.String,java.lang.Object[])>("Table \'%s\' does not exist", v);
v = <org.apache.drill.exec.store.kafka.KafkaGroupScan: org.slf4j.Logger logger>;
v = virtualinvoke v.<org.apache.drill.common.exceptions.UserException$Builder: org.apache.drill.common.exceptions.UserException build(org.slf4j.Logger)>(v);
throw v;
label:
v = staticinvoke <java.util.Collections: java.util.List singletonList(java.lang.Object)>(v);
virtualinvoke v.<org.apache.kafka.clients.consumer.KafkaConsumer: void subscribe(java.util.Collection)>(v);
v = staticinvoke <java.time.Duration: java.time.Duration ofSeconds(long)>(5L);
virtualinvoke v.<org.apache.kafka.clients.consumer.KafkaConsumer: org.apache.kafka.clients.consumer.ConsumerRecords poll(java.time.Duration)>(v);
v = specialinvoke v.<org.apache.drill.exec.store.kafka.KafkaGroupScan: java.util.Set waitForConsumerAssignment(org.apache.kafka.clients.consumer.Consumer)>(v);
v = virtualinvoke v.<org.apache.kafka.clients.consumer.KafkaConsumer: java.util.List partitionsFor(java.lang.String)>(v);
virtualinvoke v.<org.apache.kafka.clients.consumer.KafkaConsumer: void seekToBeginning(java.util.Collection)>(v);
v = interfaceinvoke v.<java.util.Set: java.util.Iterator iterator()>();
label:
v = interfaceinvoke v.<java.util.Iterator: boolean hasNext()>();
if v == 0 goto label;
v = interfaceinvoke v.<java.util.Iterator: java.lang.Object next()>();
v = virtualinvoke v.<org.apache.kafka.clients.consumer.KafkaConsumer: long position(org.apache.kafka.common.TopicPartition)>(v);
v = staticinvoke <java.lang.Long: java.lang.Long valueOf(long)>(v);
interfaceinvoke v.<java.util.Map: java.lang.Object put(java.lang.Object,java.lang.Object)>(v, v);
goto label;
label:
virtualinvoke v.<org.apache.kafka.clients.consumer.KafkaConsumer: void seekToEnd(java.util.Collection)>(v);
v = interfaceinvoke v.<java.util.Set: java.util.Iterator iterator()>();
label:
v = interfaceinvoke v.<java.util.Iterator: boolean hasNext()>();
if v == 0 goto label;
v = interfaceinvoke v.<java.util.Iterator: java.lang.Object next()>();
v = virtualinvoke v.<org.apache.kafka.clients.consumer.KafkaConsumer: long position(org.apache.kafka.common.TopicPartition)>(v);
v = staticinvoke <java.lang.Long: java.lang.Long valueOf(long)>(v);
interfaceinvoke v.<java.util.Map: java.lang.Object put(java.lang.Object,java.lang.Object)>(v, v);
goto label;
label:
v = v.<org.apache.drill.exec.store.kafka.KafkaGroupScan: org.apache.drill.exec.store.kafka.KafkaStoragePlugin kafkaStoragePlugin>;
virtualinvoke v.<org.apache.drill.exec.store.kafka.KafkaStoragePlugin: void registerToClose(java.lang.AutoCloseable)>(v);
goto label;
label:
v := @caughtexception;
v = staticinvoke <org.apache.drill.common.exceptions.UserException: org.apache.drill.common.exceptions.UserException$Builder dataReadError(java.lang.Throwable)>(v);
v = newarray (java.lang.Object)[1];
v[0] = v;
v = virtualinvoke v.<org.apache.drill.common.exceptions.UserException$Builder: org.apache.drill.common.exceptions.UserException$Builder message(java.lang.String,java.lang.Object[])>("Failed to fetch start/end offsets of the topic %s", v);
v = virtualinvoke v.<java.lang.Exception: java.lang.String getMessage()>();
v = virtualinvoke v.<org.apache.drill.common.exceptions.UserException$Builder: org.apache.drill.common.exceptions.UserException$Builder addContext(java.lang.String)>(v);
v = <org.apache.drill.exec.store.kafka.KafkaGroupScan: org.slf4j.Logger logger>;
v = virtualinvoke v.<org.apache.drill.common.exceptions.UserException$Builder: org.apache.drill.common.exceptions.UserException build(org.slf4j.Logger)>(v);
throw v;
label:
v := @caughtexception;
v = v.<org.apache.drill.exec.store.kafka.KafkaGroupScan: org.apache.drill.exec.store.kafka.KafkaStoragePlugin kafkaStoragePlugin>;
virtualinvoke v.<org.apache.drill.exec.store.kafka.KafkaStoragePlugin: void registerToClose(java.lang.AutoCloseable)>(v);
throw v;
label:
v = interfaceinvoke v.<java.util.List: java.util.Iterator iterator()>();
label:
v = interfaceinvoke v.<java.util.Iterator: boolean hasNext()>();
if v == 0 goto label;
v = interfaceinvoke v.<java.util.Iterator: java.lang.Object next()>();
v = new org.apache.kafka.common.TopicPartition;
v = virtualinvoke v.<org.apache.kafka.common.PartitionInfo: java.lang.String topic()>();
v = virtualinvoke v.<org.apache.kafka.common.PartitionInfo: int partition()>();
specialinvoke v.<org.apache.kafka.common.TopicPartition: void <init>(java.lang.String,int)>(v, v);
v = interfaceinvoke v.<java.util.Map: java.lang.Object get(java.lang.Object)>(v);
v = virtualinvoke v.<java.lang.Long: long longValue()>();
v = interfaceinvoke v.<java.util.Map: java.lang.Object get(java.lang.Object)>(v);
v = virtualinvoke v.<java.lang.Long: long longValue()>();
v = <org.apache.drill.exec.store.kafka.KafkaGroupScan: org.slf4j.Logger logger>;
v = staticinvoke <java.lang.Long: java.lang.Long valueOf(long)>(v);
interfaceinvoke v.<org.slf4j.Logger: void debug(java.lang.String,java.lang.Object,java.lang.Object)>("Latest offset of {} is {}", v, v);
v = <org.apache.drill.exec.store.kafka.KafkaGroupScan: org.slf4j.Logger logger>;
v = staticinvoke <java.lang.Long: java.lang.Long valueOf(long)>(v);
interfaceinvoke v.<org.slf4j.Logger: void debug(java.lang.String,java.lang.Object,java.lang.Object)>("Last committed offset of {} is {}", v, v);
v = new org.apache.drill.exec.store.kafka.KafkaPartitionScanSpec;
v = virtualinvoke v.<org.apache.kafka.common.TopicPartition: java.lang.String topic()>();
v = virtualinvoke v.<org.apache.kafka.common.TopicPartition: int partition()>();
specialinvoke v.<org.apache.drill.exec.store.kafka.KafkaPartitionScanSpec: void <init>(java.lang.String,int,long,long)>(v, v, v, v);
v = new org.apache.drill.exec.store.kafka.KafkaGroupScan$PartitionScanWork;
v = new org.apache.drill.exec.store.schedule.EndpointByteMapImpl;
specialinvoke v.<org.apache.drill.exec.store.schedule.EndpointByteMapImpl: void <init>()>();
specialinvoke v.<org.apache.drill.exec.store.kafka.KafkaGroupScan$PartitionScanWork: void <init>(org.apache.drill.exec.store.schedule.EndpointByteMap,org.apache.drill.exec.store.kafka.KafkaPartitionScanSpec)>(v, v);
v = virtualinvoke v.<org.apache.kafka.common.PartitionInfo: org.apache.kafka.common.Node[] inSyncReplicas()>();
v = lengthof v;
v = 0;
label:
if v >= v goto label;
v = v[v];
v = virtualinvoke v.<org.apache.kafka.common.Node: java.lang.String host()>();
v = interfaceinvoke v.<java.util.Map: java.lang.Object get(java.lang.Object)>(v);
if v == null goto label;
v = virtualinvoke v.<org.apache.drill.exec.store.kafka.KafkaGroupScan$PartitionScanWork: org.apache.drill.exec.store.schedule.EndpointByteMap getByteMap()>();
v = virtualinvoke v.<org.apache.drill.exec.store.kafka.KafkaGroupScan$PartitionScanWork: long getTotalBytes()>();
interfaceinvoke v.<org.apache.drill.exec.store.schedule.EndpointByteMap: void add(org.apache.drill.exec.proto.CoordinationProtos$DrillbitEndpoint,long)>(v, v);
label:
v = v + 1;
goto label;
label:
v = v.<org.apache.drill.exec.store.kafka.KafkaGroupScan: java.util.Map partitionWorkMap>;
interfaceinvoke v.<java.util.Map: java.lang.Object put(java.lang.Object,java.lang.Object)>(v, v);
goto label;
label:
return;
catch java.lang.Exception from label to label with label;
catch java.lang.Throwable from label to label with label;
}
private java.util.Set waitForConsumerAssignment(org.apache.kafka.clients.consumer.Consumer) throws java.lang.InterruptedException
{
java.lang.Object[] v;
org.apache.drill.common.exceptions.UserException$Builder v, v;
org.apache.drill.common.exceptions.UserException v;
org.apache.drill.exec.server.options.SystemOptionManager v;
long v, v;
org.apache.drill.exec.store.kafka.KafkaGroupScan v;
org.apache.drill.exec.server.DrillbitContext v;
byte v, v;
java.lang.Long v;
boolean v;
org.apache.kafka.clients.consumer.Consumer v;
org.slf4j.Logger v;
java.util.Set v;
org.apache.drill.exec.store.kafka.KafkaStoragePlugin v;
v := @this: org.apache.drill.exec.store.kafka.KafkaGroupScan;
v := @parameter: org.apache.kafka.clients.consumer.Consumer;
v = interfaceinvoke v.<org.apache.kafka.clients.consumer.Consumer: java.util.Set assignment()>();
v = v.<org.apache.drill.exec.store.kafka.KafkaGroupScan: org.apache.drill.exec.store.kafka.KafkaStoragePlugin kafkaStoragePlugin>;
v = virtualinvoke v.<org.apache.drill.exec.store.kafka.KafkaStoragePlugin: org.apache.drill.exec.server.DrillbitContext getContext()>();
v = virtualinvoke v.<org.apache.drill.exec.server.DrillbitContext: org.apache.drill.exec.server.options.SystemOptionManager getOptionManager()>();
v = virtualinvoke v.<org.apache.drill.exec.server.options.SystemOptionManager: long getLong(java.lang.String)>("store.kafka.poll.timeout");
v = 0L;
label:
v = interfaceinvoke v.<java.util.Set: boolean isEmpty()>();
if v == 0 goto label;
v = v cmp v;
if v >= 0 goto label;
staticinvoke <java.lang.Thread: void sleep(long)>(500L);
v = v + 500L;
v = interfaceinvoke v.<org.apache.kafka.clients.consumer.Consumer: java.util.Set assignment()>();
goto label;
label:
v = v cmp v;
if v < 0 goto label;
v = staticinvoke <org.apache.drill.common.exceptions.UserException: org.apache.drill.common.exceptions.UserException$Builder dataReadError()>();
v = newarray (java.lang.Object)[1];
v = staticinvoke <java.lang.Long: java.lang.Long valueOf(long)>(v);
v[0] = v;
v = virtualinvoke v.<org.apache.drill.common.exceptions.UserException$Builder: org.apache.drill.common.exceptions.UserException$Builder message(java.lang.String,java.lang.Object[])>("Consumer assignment wasn\'t completed within the timeout %s", v);
v = <org.apache.drill.exec.store.kafka.KafkaGroupScan: org.slf4j.Logger logger>;
v = virtualinvoke v.<org.apache.drill.common.exceptions.UserException$Builder: org.apache.drill.common.exceptions.UserException build(org.slf4j.Logger)>(v);
throw v;
label:
return v;
}
public void applyAssignments(java.util.List)
{
org.apache.drill.exec.store.kafka.KafkaGroupScan v;
java.util.List v;
org.apache.drill.shaded.guava.com.google.common.collect.ListMultimap v;
java.util.Collection v;
java.util.Map v;
java.util.ArrayList v;
v := @this: org.apache.drill.exec.store.kafka.KafkaGroupScan;
v := @parameter: java.util.List;
v = v.<org.apache.drill.exec.store.kafka.KafkaGroupScan: java.util.Map partitionWorkMap>;
v = interfaceinvoke v.<java.util.Map: java.util.Collection values()>();
v = staticinvoke <org.apache.drill.shaded.guava.com.google.common.collect.Lists: java.util.ArrayList newArrayList(java.lang.Iterable)>(v);
v = staticinvoke <org.apache.drill.exec.store.schedule.AssignmentCreator: org.apache.drill.shaded.guava.com.google.common.collect.ListMultimap getMappings(java.util.List,java.util.List)>(v, v);
v.<org.apache.drill.exec.store.kafka.KafkaGroupScan: org.apache.drill.shaded.guava.com.google.common.collect.ListMultimap assignments> = v;
return;
}
public org.apache.drill.exec.physical.base.GroupScan applyLimit(int)
{
org.apache.drill.exec.store.kafka.KafkaGroupScan v, v;
int v, v;
v := @this: org.apache.drill.exec.store.kafka.KafkaGroupScan;
v := @parameter: int;
v = v.<org.apache.drill.exec.store.kafka.KafkaGroupScan: int records>;
if v <= v goto label;
v = new org.apache.drill.exec.store.kafka.KafkaGroupScan;
specialinvoke v.<org.apache.drill.exec.store.kafka.KafkaGroupScan: void <init>(org.apache.drill.exec.store.kafka.KafkaGroupScan,int)>(v, v);
return v;
label:
return null;
}
public boolean supportsLimitPushdown()
{
org.apache.drill.exec.store.kafka.KafkaGroupScan v;
v := @this: org.apache.drill.exec.store.kafka.KafkaGroupScan;
return 1;
}
public org.apache.drill.exec.store.kafka.KafkaSubScan getSpecificScan(int)
{
org.apache.drill.exec.store.kafka.KafkaSubScan v;
java.lang.Integer v;
java.util.function.Function v;
org.apache.drill.exec.store.kafka.KafkaGroupScan v;
org.apache.drill.exec.store.kafka.KafkaStoragePlugin v;
java.util.List v, v;
java.util.stream.Stream v, v;
org.apache.drill.shaded.guava.com.google.common.collect.ListMultimap v;
int v, v;
java.lang.Object v;
java.lang.String v;
java.util.stream.Collector v;
v := @this: org.apache.drill.exec.store.kafka.KafkaGroupScan;
v := @parameter: int;
v = v.<org.apache.drill.exec.store.kafka.KafkaGroupScan: org.apache.drill.shaded.guava.com.google.common.collect.ListMultimap assignments>;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(v);
v = interfaceinvoke v.<org.apache.drill.shaded.guava.com.google.common.collect.ListMultimap: java.util.List get(java.lang.Object)>(v);
v = interfaceinvoke v.<java.util.List: java.util.stream.Stream stream()>();
v = staticinvoke <org.apache.drill.exec.store.kafka.KafkaGroupScan$getPartitionScanSpec__4: java.util.function.Function bootstrap$()>();
v = interfaceinvoke v.<java.util.stream.Stream: java.util.stream.Stream map(java.util.function.Function)>(v);
v = staticinvoke <java.util.stream.Collectors: java.util.stream.Collector toList()>();
v = interfaceinvoke v.<java.util.stream.Stream: java.lang.Object collect(java.util.stream.Collector)>(v);
v = new org.apache.drill.exec.store.kafka.KafkaSubScan;
v = virtualinvoke v.<org.apache.drill.exec.store.kafka.KafkaGroupScan: java.lang.String getUserName()>();
v = v.<org.apache.drill.exec.store.kafka.KafkaGroupScan: org.apache.drill.exec.store.kafka.KafkaStoragePlugin kafkaStoragePlugin>;
v = v.<org.apache.drill.exec.store.kafka.KafkaGroupScan: java.util.List columns>;
v = v.<org.apache.drill.exec.store.kafka.KafkaGroupScan: int records>;
specialinvoke v.<org.apache.drill.exec.store.kafka.KafkaSubScan: void <init>(java.lang.String,org.apache.drill.exec.store.kafka.KafkaStoragePlugin,java.util.List,int,java.util.List)>(v, v, v, v, v);
return v;
}
public int getMaxParallelizationWidth()
{
org.apache.drill.exec.store.kafka.KafkaGroupScan v;
java.util.Collection v;
java.util.Map v;
int v;
v := @this: org.apache.drill.exec.store.kafka.KafkaGroupScan;
v = v.<org.apache.drill.exec.store.kafka.KafkaGroupScan: java.util.Map partitionWorkMap>;
v = interfaceinvoke v.<java.util.Map: java.util.Collection values()>();
v = interfaceinvoke v.<java.util.Collection: int size()>();
return v;
}
public org.apache.drill.exec.physical.base.ScanStats getScanStats()
{
java.util.Iterator v;
org.apache.drill.exec.physical.base.ScanStats v;
java.util.Collection v;
long v, v, v, v, v;
org.apache.drill.exec.store.kafka.KafkaGroupScan v;
org.apache.drill.exec.store.kafka.KafkaPartitionScanSpec v, v;
java.util.Map v;
java.lang.Object v;
org.apache.drill.exec.physical.base.ScanStats$GroupScanProperty v;
boolean v;
v := @this: org.apache.drill.exec.store.kafka.KafkaGroupScan;
v = 0L;
v = v.<org.apache.drill.exec.store.kafka.KafkaGroupScan: java.util.Map partitionWorkMap>;
v = interfaceinvoke v.<java.util.Map: java.util.Collection values()>();
v = interfaceinvoke v.<java.util.Collection: java.util.Iterator iterator()>();
label:
v = interfaceinvoke v.<java.util.Iterator: boolean hasNext()>();
if v == 0 goto label;
v = interfaceinvoke v.<java.util.Iterator: java.lang.Object next()>();
v = virtualinvoke v.<org.apache.drill.exec.store.kafka.KafkaGroupScan$PartitionScanWork: org.apache.drill.exec.store.kafka.KafkaPartitionScanSpec getPartitionScanSpec()>();
v = virtualinvoke v.<org.apache.drill.exec.store.kafka.KafkaPartitionScanSpec: long getEndOffset()>();
v = virtualinvoke v.<org.apache.drill.exec.store.kafka.KafkaGroupScan$PartitionScanWork: org.apache.drill.exec.store.kafka.KafkaPartitionScanSpec getPartitionScanSpec()>();
v = virtualinvoke v.<org.apache.drill.exec.store.kafka.KafkaPartitionScanSpec: long getStartOffset()>();
v = v - v;
v = v + v;
goto label;
label:
v = new org.apache.drill.exec.physical.base.ScanStats;
v = <org.apache.drill.exec.physical.base.ScanStats$GroupScanProperty: org.apache.drill.exec.physical.base.ScanStats$GroupScanProperty EXACT_ROW_COUNT>;
v = v * 1024L;
specialinvoke v.<org.apache.drill.exec.physical.base.ScanStats: void <init>(org.apache.drill.exec.physical.base.ScanStats$GroupScanProperty,double,double,double)>(v, v, 1.0, v);
return v;
}
public java.lang.String getDigest()
{
org.apache.drill.exec.store.kafka.KafkaGroupScan v;
java.lang.String v;
v := @this: org.apache.drill.exec.store.kafka.KafkaGroupScan;
v = virtualinvoke v.<org.apache.drill.exec.store.kafka.KafkaGroupScan: java.lang.String toString()>();
return v;
}
public org.apache.drill.exec.physical.base.PhysicalOperator getNewWithChildren(java.util.List)
{
org.apache.drill.exec.store.kafka.KafkaGroupScan v, v;
java.util.List v;
boolean v;
v := @this: org.apache.drill.exec.store.kafka.KafkaGroupScan;
v := @parameter: java.util.List;
v = interfaceinvoke v.<java.util.List: boolean isEmpty()>();
staticinvoke <org.apache.drill.shaded.guava.com.google.common.base.Preconditions: void checkArgument(boolean)>(v);
v = new org.apache.drill.exec.store.kafka.KafkaGroupScan;
specialinvoke v.<org.apache.drill.exec.store.kafka.KafkaGroupScan: void <init>(org.apache.drill.exec.store.kafka.KafkaGroupScan)>(v);
return v;
}
public java.util.List getOperatorAffinity()
{
org.apache.drill.exec.store.kafka.KafkaGroupScan v;
java.util.List v, v, v;
java.util.Collection v;
java.util.Map v;
java.util.ArrayList v;
v := @this: org.apache.drill.exec.store.kafka.KafkaGroupScan;
v = v.<org.apache.drill.exec.store.kafka.KafkaGroupScan: java.util.List affinities>;
if v != null goto label;
v = v.<org.apache.drill.exec.store.kafka.KafkaGroupScan: java.util.Map partitionWorkMap>;
v = interfaceinvoke v.<java.util.Map: java.util.Collection values()>();
v = staticinvoke <org.apache.drill.shaded.guava.com.google.common.collect.Lists: java.util.ArrayList newArrayList(java.lang.Iterable)>(v);
v = staticinvoke <org.apache.drill.exec.store.schedule.AffinityCreator: java.util.List getAffinityMap(java.util.List)>(v);
v.<org.apache.drill.exec.store.kafka.KafkaGroupScan: java.util.List affinities> = v;
label:
v = v.<org.apache.drill.exec.store.kafka.KafkaGroupScan: java.util.List affinities>;
return v;
}
public boolean canPushdownProjects(java.util.List)
{
org.apache.drill.exec.store.kafka.KafkaGroupScan v;
java.util.List v;
v := @this: org.apache.drill.exec.store.kafka.KafkaGroupScan;
v := @parameter: java.util.List;
return 1;
}
public org.apache.drill.exec.physical.base.GroupScan clone(java.util.List)
{
org.apache.drill.exec.store.kafka.KafkaGroupScan v, v;
java.util.List v;
v := @this: org.apache.drill.exec.store.kafka.KafkaGroupScan;
v := @parameter: java.util.List;
v = new org.apache.drill.exec.store.kafka.KafkaGroupScan;
specialinvoke v.<org.apache.drill.exec.store.kafka.KafkaGroupScan: void <init>(org.apache.drill.exec.store.kafka.KafkaGroupScan,java.util.List)>(v, v);
return v;
}
public org.apache.drill.exec.physical.base.GroupScan cloneWithNewSpec(java.util.List)
{
org.apache.drill.exec.store.kafka.KafkaGroupScan$PartitionScanWork v;
java.util.HashSet v;
org.apache.drill.exec.store.kafka.KafkaGroupScan v, v;
java.util.Map v, v, v;
int v;
java.lang.String v;
org.apache.drill.exec.store.schedule.EndpointByteMap v;
boolean v;
org.apache.kafka.common.TopicPartition v;
java.util.Iterator v;
java.util.function.Predicate v;
java.util.Set v;
java.util.List v;
java.lang.Object v, v;
v := @this: org.apache.drill.exec.store.kafka.KafkaGroupScan;
v := @parameter: java.util.List;
v = new org.apache.drill.exec.store.kafka.KafkaGroupScan;
specialinvoke v.<org.apache.drill.exec.store.kafka.KafkaGroupScan: void <init>(org.apache.drill.exec.store.kafka.KafkaGroupScan)>(v);
v = staticinvoke <org.apache.drill.shaded.guava.com.google.common.collect.Sets: java.util.HashSet newHashSet()>();
v = interfaceinvoke v.<java.util.List: java.util.Iterator iterator()>();
label:
v = interfaceinvoke v.<java.util.Iterator: boolean hasNext()>();
if v == 0 goto label;
v = interfaceinvoke v.<java.util.Iterator: java.lang.Object next()>();
v = new org.apache.kafka.common.TopicPartition;
v = virtualinvoke v.<org.apache.drill.exec.store.kafka.KafkaPartitionScanSpec: java.lang.String getTopicName()>();
v = virtualinvoke v.<org.apache.drill.exec.store.kafka.KafkaPartitionScanSpec: int getPartitionId()>();
specialinvoke v.<org.apache.kafka.common.TopicPartition: void <init>(java.lang.String,int)>(v, v);
virtualinvoke v.<java.util.HashSet: boolean add(java.lang.Object)>(v);
v = new org.apache.drill.exec.store.kafka.KafkaGroupScan$PartitionScanWork;
v = v.<org.apache.drill.exec.store.kafka.KafkaGroupScan: java.util.Map partitionWorkMap>;
v = interfaceinvoke v.<java.util.Map: java.lang.Object get(java.lang.Object)>(v);
v = virtualinvoke v.<org.apache.drill.exec.store.kafka.KafkaGroupScan$PartitionScanWork: org.apache.drill.exec.store.schedule.EndpointByteMap getByteMap()>();
specialinvoke v.<org.apache.drill.exec.store.kafka.KafkaGroupScan$PartitionScanWork: void <init>(org.apache.drill.exec.store.schedule.EndpointByteMap,org.apache.drill.exec.store.kafka.KafkaPartitionScanSpec)>(v, v);
v = v.<org.apache.drill.exec.store.kafka.KafkaGroupScan: java.util.Map partitionWorkMap>;
interfaceinvoke v.<java.util.Map: java.lang.Object put(java.lang.Object,java.lang.Object)>(v, v);
goto label;
label:
v = v.<org.apache.drill.exec.store.kafka.KafkaGroupScan: java.util.Map partitionWorkMap>;
v = interfaceinvoke v.<java.util.Map: java.util.Set keySet()>();
v = staticinvoke <org.apache.drill.exec.store.kafka.KafkaGroupScan$lambda_cloneWithNewSpec_1__5: java.util.function.Predicate bootstrap$(java.util.HashSet)>(v);
interfaceinvoke v.<java.util.Set: boolean removeIf(java.util.function.Predicate)>(v);
return v;
}
public org.apache.drill.exec.store.kafka.KafkaStoragePluginConfig getKafkaStoragePluginConfig()
{
org.apache.drill.exec.store.kafka.KafkaGroupScan v;
org.apache.drill.exec.store.kafka.KafkaStoragePlugin v;
org.apache.drill.exec.store.kafka.KafkaStoragePluginConfig v;
v := @this: org.apache.drill.exec.store.kafka.KafkaGroupScan;
v = v.<org.apache.drill.exec.store.kafka.KafkaGroupScan: org.apache.drill.exec.store.kafka.KafkaStoragePlugin kafkaStoragePlugin>;
v = virtualinvoke v.<org.apache.drill.exec.store.kafka.KafkaStoragePlugin: org.apache.drill.exec.store.kafka.KafkaStoragePluginConfig getConfig()>();
return v;
}
public java.util.List getColumns()
{
org.apache.drill.exec.store.kafka.KafkaGroupScan v;
java.util.List v;
v := @this: org.apache.drill.exec.store.kafka.KafkaGroupScan;
v = v.<org.apache.drill.exec.store.kafka.KafkaGroupScan: java.util.List columns>;
return v;
}
public int getRecords()
{
int v;
org.apache.drill.exec.store.kafka.KafkaGroupScan v;
v := @this: org.apache.drill.exec.store.kafka.KafkaGroupScan;
v = v.<org.apache.drill.exec.store.kafka.KafkaGroupScan: int records>;
return v;
}
public org.apache.drill.exec.store.kafka.KafkaScanSpec getKafkaScanSpec()
{
org.apache.drill.exec.store.kafka.KafkaGroupScan v;
org.apache.drill.exec.store.kafka.KafkaScanSpec v;
v := @this: org.apache.drill.exec.store.kafka.KafkaGroupScan;
v = v.<org.apache.drill.exec.store.kafka.KafkaGroupScan: org.apache.drill.exec.store.kafka.KafkaScanSpec kafkaScanSpec>;
return v;
}
public org.apache.drill.exec.store.kafka.KafkaStoragePlugin getStoragePlugin()
{
org.apache.drill.exec.store.kafka.KafkaGroupScan v;
org.apache.drill.exec.store.kafka.KafkaStoragePlugin v;
v := @this: org.apache.drill.exec.store.kafka.KafkaGroupScan;
v = v.<org.apache.drill.exec.store.kafka.KafkaGroupScan: org.apache.drill.exec.store.kafka.KafkaStoragePlugin kafkaStoragePlugin>;
return v;
}
public java.lang.String toString()
{
org.apache.drill.exec.store.kafka.KafkaScanSpec v;
org.apache.drill.common.PlanStringBuilder v, v, v, v;
org.apache.drill.exec.store.kafka.KafkaGroupScan v;
java.util.List v;
int v;
java.lang.String v;
v := @this: org.apache.drill.exec.store.kafka.KafkaGroupScan;
v = new org.apache.drill.common.PlanStringBuilder;
specialinvoke v.<org.apache.drill.common.PlanStringBuilder: void <init>(java.lang.String)>("");
v = v.<org.apache.drill.exec.store.kafka.KafkaGroupScan: org.apache.drill.exec.store.kafka.KafkaScanSpec kafkaScanSpec>;
v = virtualinvoke v.<org.apache.drill.common.PlanStringBuilder: org.apache.drill.common.PlanStringBuilder field(java.lang.String,java.lang.Object)>("scanSpec", v);
v = v.<org.apache.drill.exec.store.kafka.KafkaGroupScan: java.util.List columns>;
v = virtualinvoke v.<org.apache.drill.common.PlanStringBuilder: org.apache.drill.common.PlanStringBuilder field(java.lang.String,java.lang.Object)>("columns", v);
v = v.<org.apache.drill.exec.store.kafka.KafkaGroupScan: int records>;
v = virtualinvoke v.<org.apache.drill.common.PlanStringBuilder: org.apache.drill.common.PlanStringBuilder field(java.lang.String,int)>("records", v);
v = virtualinvoke v.<org.apache.drill.common.PlanStringBuilder: java.lang.String toString()>();
return v;
}
public java.util.List getPartitionScanSpecList()
{
java.util.Collection v;
java.util.function.Function v;
org.apache.drill.exec.store.kafka.KafkaGroupScan v;
java.util.stream.Stream v, v;
java.util.Map v;
java.lang.Object v;
java.util.stream.Collector v;
v := @this: org.apache.drill.exec.store.kafka.KafkaGroupScan;
v = v.<org.apache.drill.exec.store.kafka.KafkaGroupScan: java.util.Map partitionWorkMap>;
v = interfaceinvoke v.<java.util.Map: java.util.Collection values()>();
v = interfaceinvoke v.<java.util.Collection: java.util.stream.Stream stream()>();
v = staticinvoke <org.apache.drill.exec.store.kafka.KafkaGroupScan$lambda_getPartitionScanSpecList_2__6: java.util.function.Function bootstrap$()>();
v = interfaceinvoke v.<java.util.stream.Stream: java.util.stream.Stream map(java.util.function.Function)>(v);
v = staticinvoke <java.util.stream.Collectors: java.util.stream.Collector toList()>();
v = interfaceinvoke v.<java.util.stream.Stream: java.lang.Object collect(java.util.stream.Collector)>(v);
return v;
}
static void <clinit>()
{
org.slf4j.Logger v;
v = staticinvoke <org.slf4j.LoggerFactory: org.slf4j.Logger getLogger(java.lang.Class)>(class "Lorg/apache/drill/exec/store/kafka/KafkaGroupScan;");
<org.apache.drill.exec.store.kafka.KafkaGroupScan: org.slf4j.Logger logger> = v;
return;
}
}