public class org.apache.parquet.hadoop.ColumnChunkIncReadStore extends java.lang.Object implements org.apache.parquet.column.page.PageReadStore
{
private static final org.slf4j.Logger logger;
private static org.apache.parquet.format.converter.ParquetMetadataConverter METADATA_CONVERTER;
private org.apache.parquet.compression.CompressionCodecFactory codecFactory;
private oadd.org.apache.drill.exec.memory.BufferAllocator allocator;
private oadd.org.apache.hadoop.fs.FileSystem fs;
private oadd.org.apache.hadoop.fs.Path path;
private long rowCount;
private java.util.List streams;
private java.util.Map columns;
public void <init>(long, org.apache.parquet.compression.CompressionCodecFactory, oadd.org.apache.drill.exec.memory.BufferAllocator, oadd.org.apache.hadoop.fs.FileSystem, oadd.org.apache.hadoop.fs.Path)
{
java.util.HashMap v;
long v;
java.util.ArrayList v;
org.apache.parquet.hadoop.ColumnChunkIncReadStore v;
org.apache.parquet.compression.CompressionCodecFactory v;
oadd.org.apache.drill.exec.memory.BufferAllocator v;
oadd.org.apache.hadoop.fs.Path v;
oadd.org.apache.hadoop.fs.FileSystem v;
v := @this: org.apache.parquet.hadoop.ColumnChunkIncReadStore;
v := @parameter: long;
v := @parameter: org.apache.parquet.compression.CompressionCodecFactory;
v := @parameter: oadd.org.apache.drill.exec.memory.BufferAllocator;
v := @parameter: oadd.org.apache.hadoop.fs.FileSystem;
v := @parameter: oadd.org.apache.hadoop.fs.Path;
specialinvoke v.<java.lang.Object: void <init>()>();
v = new java.util.ArrayList;
specialinvoke v.<java.util.ArrayList: void <init>()>();
v.<org.apache.parquet.hadoop.ColumnChunkIncReadStore: java.util.List streams> = v;
v = new java.util.HashMap;
specialinvoke v.<java.util.HashMap: void <init>()>();
v.<org.apache.parquet.hadoop.ColumnChunkIncReadStore: java.util.Map columns> = v;
v.<org.apache.parquet.hadoop.ColumnChunkIncReadStore: org.apache.parquet.compression.CompressionCodecFactory codecFactory> = v;
v.<org.apache.parquet.hadoop.ColumnChunkIncReadStore: oadd.org.apache.drill.exec.memory.BufferAllocator allocator> = v;
v.<org.apache.parquet.hadoop.ColumnChunkIncReadStore: oadd.org.apache.hadoop.fs.FileSystem fs> = v;
v.<org.apache.parquet.hadoop.ColumnChunkIncReadStore: oadd.org.apache.hadoop.fs.Path path> = v;
v.<org.apache.parquet.hadoop.ColumnChunkIncReadStore: long rowCount> = v;
return;
}
public void addColumn(org.apache.parquet.column.ColumnDescriptor, org.apache.parquet.hadoop.metadata.ColumnChunkMetaData) throws java.io.IOException
{
oadd.org.apache.hadoop.fs.FSDataInputStream v;
long v;
org.apache.parquet.hadoop.metadata.ColumnChunkMetaData v;
java.util.List v;
org.apache.parquet.hadoop.ColumnChunkIncReadStore v;
org.apache.parquet.column.ColumnDescriptor v;
java.util.Map v;
org.apache.parquet.hadoop.ColumnChunkIncReadStore$ColumnChunkIncPageReader v;
oadd.org.apache.hadoop.fs.Path v;
oadd.org.apache.hadoop.fs.FileSystem v;
v := @this: org.apache.parquet.hadoop.ColumnChunkIncReadStore;
v := @parameter: org.apache.parquet.column.ColumnDescriptor;
v := @parameter: org.apache.parquet.hadoop.metadata.ColumnChunkMetaData;
v = v.<org.apache.parquet.hadoop.ColumnChunkIncReadStore: oadd.org.apache.hadoop.fs.FileSystem fs>;
v = v.<org.apache.parquet.hadoop.ColumnChunkIncReadStore: oadd.org.apache.hadoop.fs.Path path>;
v = virtualinvoke v.<oadd.org.apache.hadoop.fs.FileSystem: oadd.org.apache.hadoop.fs.FSDataInputStream open(oadd.org.apache.hadoop.fs.Path)>(v);
v = v.<org.apache.parquet.hadoop.ColumnChunkIncReadStore: java.util.List streams>;
interfaceinvoke v.<java.util.List: boolean add(java.lang.Object)>(v);
v = virtualinvoke v.<org.apache.parquet.hadoop.metadata.ColumnChunkMetaData: long getStartingPos()>();
virtualinvoke v.<oadd.org.apache.hadoop.fs.FSDataInputStream: void seek(long)>(v);
v = new org.apache.parquet.hadoop.ColumnChunkIncReadStore$ColumnChunkIncPageReader;
specialinvoke v.<org.apache.parquet.hadoop.ColumnChunkIncReadStore$ColumnChunkIncPageReader: void <init>(org.apache.parquet.hadoop.ColumnChunkIncReadStore,org.apache.parquet.hadoop.metadata.ColumnChunkMetaData,org.apache.parquet.column.ColumnDescriptor,oadd.org.apache.hadoop.fs.FSDataInputStream)>(v, v, v, v);
v = v.<org.apache.parquet.hadoop.ColumnChunkIncReadStore: java.util.Map columns>;
interfaceinvoke v.<java.util.Map: java.lang.Object put(java.lang.Object,java.lang.Object)>(v, v);
return;
}
public void close() throws java.io.IOException
{
java.util.Iterator v, v;
java.util.Collection v;
java.util.List v;
org.apache.parquet.hadoop.ColumnChunkIncReadStore v;
java.util.Map v;
java.lang.Object v, v;
boolean v, v;
v := @this: org.apache.parquet.hadoop.ColumnChunkIncReadStore;
v = v.<org.apache.parquet.hadoop.ColumnChunkIncReadStore: java.util.List streams>;
v = interfaceinvoke v.<java.util.List: java.util.Iterator iterator()>();
label:
v = interfaceinvoke v.<java.util.Iterator: boolean hasNext()>();
if v == 0 goto label;
v = interfaceinvoke v.<java.util.Iterator: java.lang.Object next()>();
virtualinvoke v.<oadd.org.apache.hadoop.fs.FSDataInputStream: void close()>();
goto label;
label:
v = v.<org.apache.parquet.hadoop.ColumnChunkIncReadStore: java.util.Map columns>;
v = interfaceinvoke v.<java.util.Map: java.util.Collection values()>();
v = interfaceinvoke v.<java.util.Collection: java.util.Iterator iterator()>();
label:
v = interfaceinvoke v.<java.util.Iterator: boolean hasNext()>();
if v == 0 goto label;
v = interfaceinvoke v.<java.util.Iterator: java.lang.Object next()>();
virtualinvoke v.<org.apache.parquet.hadoop.ColumnChunkIncReadStore$ColumnChunkIncPageReader: void close()>();
goto label;
label:
return;
}
public org.apache.parquet.column.page.PageReader getPageReader(org.apache.parquet.column.ColumnDescriptor)
{
org.apache.parquet.hadoop.ColumnChunkIncReadStore v;
org.apache.parquet.column.ColumnDescriptor v;
java.util.Map v;
java.lang.Object v;
v := @this: org.apache.parquet.hadoop.ColumnChunkIncReadStore;
v := @parameter: org.apache.parquet.column.ColumnDescriptor;
v = v.<org.apache.parquet.hadoop.ColumnChunkIncReadStore: java.util.Map columns>;
v = interfaceinvoke v.<java.util.Map: java.lang.Object get(java.lang.Object)>(v);
return v;
}
public long getRowCount()
{
long v;
org.apache.parquet.hadoop.ColumnChunkIncReadStore v;
v := @this: org.apache.parquet.hadoop.ColumnChunkIncReadStore;
v = v.<org.apache.parquet.hadoop.ColumnChunkIncReadStore: long rowCount>;
return v;
}
public java.lang.String toString()
{
org.apache.parquet.hadoop.ColumnChunkIncReadStore v;
java.lang.String v;
java.net.URI v;
oadd.org.apache.hadoop.fs.Path v;
v := @this: org.apache.parquet.hadoop.ColumnChunkIncReadStore;
v = v.<org.apache.parquet.hadoop.ColumnChunkIncReadStore: oadd.org.apache.hadoop.fs.Path path>;
v = virtualinvoke v.<oadd.org.apache.hadoop.fs.Path: java.net.URI toUri()>();
v = dynamicinvoke "makeConcatWithConstants" <java.lang.String (java.net.URI)>(v) <java.lang.invoke.StringConcatFactory: java.lang.invoke.CallSite makeConcatWithConstants(java.lang.invoke.MethodHandles$Lookup,java.lang.String,java.lang.invoke.MethodType,java.lang.String,java.lang.Object[])>("ColumnChunkIncReadStore[File=\u]");
return v;
}
static void <clinit>()
{
org.slf4j.Logger v;
org.apache.parquet.format.converter.ParquetMetadataConverter v;
v = staticinvoke <org.slf4j.LoggerFactory: org.slf4j.Logger getLogger(java.lang.Class)>(class "Lorg/apache/parquet/hadoop/ColumnChunkIncReadStore;");
<org.apache.parquet.hadoop.ColumnChunkIncReadStore: org.slf4j.Logger logger> = v;
v = new org.apache.parquet.format.converter.ParquetMetadataConverter;
specialinvoke v.<org.apache.parquet.format.converter.ParquetMetadataConverter: void <init>()>();
<org.apache.parquet.hadoop.ColumnChunkIncReadStore: org.apache.parquet.format.converter.ParquetMetadataConverter METADATA_CONVERTER> = v;
return;
}
}