public class org.apache.parquet.hadoop.ParquetFileWriter extends java.lang.Object
{
private static final org.slf4j.Logger LOG;
private final org.apache.parquet.format.converter.ParquetMetadataConverter metadataConverter;
public static final java.lang.String PARQUET_METADATA_FILE;
public static final java.lang.String MAGIC_STR;
public static final byte[] MAGIC;
public static final java.lang.String EF_MAGIC_STR;
public static final byte[] EFMAGIC;
public static final java.lang.String PARQUET_COMMON_METADATA_FILE;
public static final int CURRENT_VERSION;
protected final org.apache.parquet.io.PositionOutputStream out;
private final org.apache.parquet.schema.MessageType schema;
private final org.apache.parquet.hadoop.ParquetFileWriter$AlignmentStrategy alignment;
private final int columnIndexTruncateLength;
private java.util.List blocks;
private final java.util.List columnIndexes;
private final java.util.List offsetIndexes;
private final java.util.List bloomFilters;
private final org.apache.parquet.crypto.InternalFileEncryptor fileEncryptor;
private org.apache.parquet.hadoop.metadata.BlockMetaData currentBlock;
private java.util.List currentColumnIndexes;
private java.util.List currentOffsetIndexes;
private java.util.Map currentBloomFilters;
private long currentRecordCount;
private org.apache.parquet.column.EncodingStats$Builder encodingStatsBuilder;
private java.util.Set currentEncodings;
private long uncompressedLength;
private long compressedLength;
private org.apache.parquet.column.statistics.Statistics currentStatistics;
private org.apache.parquet.internal.column.columnindex.ColumnIndexBuilder columnIndexBuilder;
private org.apache.parquet.internal.column.columnindex.OffsetIndexBuilder offsetIndexBuilder;
private org.apache.parquet.hadoop.metadata.CompressionCodecName currentChunkCodec;
private org.apache.parquet.hadoop.metadata.ColumnPath currentChunkPath;
private org.apache.parquet.schema.PrimitiveType currentChunkType;
private long currentChunkValueCount;
private long currentChunkFirstDataPage;
private long currentChunkDictionaryPageOffset;
private org.apache.parquet.hadoop.metadata.ParquetMetadata footer;
private final java.util.zip.CRC32 crc;
private boolean pageWriteChecksumEnabled;
private org.apache.parquet.hadoop.ParquetFileWriter$STATE state;
private static final java.lang.ThreadLocal COPY_BUFFER;
public void <init>(oadd.org.apache.hadoop.conf.Configuration, org.apache.parquet.schema.MessageType, oadd.org.apache.hadoop.fs.Path) throws java.io.IOException
{
org.apache.parquet.schema.MessageType v;
org.apache.parquet.hadoop.ParquetFileWriter$Mode v;
org.apache.parquet.hadoop.ParquetFileWriter v;
org.apache.parquet.hadoop.util.HadoopOutputFile v;
oadd.org.apache.hadoop.conf.Configuration v;
oadd.org.apache.hadoop.fs.Path v;
v := @this: org.apache.parquet.hadoop.ParquetFileWriter;
v := @parameter: oadd.org.apache.hadoop.conf.Configuration;
v := @parameter: org.apache.parquet.schema.MessageType;
v := @parameter: oadd.org.apache.hadoop.fs.Path;
v = staticinvoke <org.apache.parquet.hadoop.util.HadoopOutputFile: org.apache.parquet.hadoop.util.HadoopOutputFile fromPath(oadd.org.apache.hadoop.fs.Path,oadd.org.apache.hadoop.conf.Configuration)>(v, v);
v = <org.apache.parquet.hadoop.ParquetFileWriter$Mode: org.apache.parquet.hadoop.ParquetFileWriter$Mode CREATE>;
specialinvoke v.<org.apache.parquet.hadoop.ParquetFileWriter: void <init>(org.apache.parquet.io.OutputFile,org.apache.parquet.schema.MessageType,org.apache.parquet.hadoop.ParquetFileWriter$Mode,long,int)>(v, v, v, 134217728L, 8388608);
return;
}
public void <init>(oadd.org.apache.hadoop.conf.Configuration, org.apache.parquet.schema.MessageType, oadd.org.apache.hadoop.fs.Path, org.apache.parquet.hadoop.ParquetFileWriter$Mode) throws java.io.IOException
{
org.apache.parquet.schema.MessageType v;
org.apache.parquet.hadoop.ParquetFileWriter$Mode v;
org.apache.parquet.hadoop.ParquetFileWriter v;
org.apache.parquet.hadoop.util.HadoopOutputFile v;
oadd.org.apache.hadoop.conf.Configuration v;
oadd.org.apache.hadoop.fs.Path v;
v := @this: org.apache.parquet.hadoop.ParquetFileWriter;
v := @parameter: oadd.org.apache.hadoop.conf.Configuration;
v := @parameter: org.apache.parquet.schema.MessageType;
v := @parameter: oadd.org.apache.hadoop.fs.Path;
v := @parameter: org.apache.parquet.hadoop.ParquetFileWriter$Mode;
v = staticinvoke <org.apache.parquet.hadoop.util.HadoopOutputFile: org.apache.parquet.hadoop.util.HadoopOutputFile fromPath(oadd.org.apache.hadoop.fs.Path,oadd.org.apache.hadoop.conf.Configuration)>(v, v);
specialinvoke v.<org.apache.parquet.hadoop.ParquetFileWriter: void <init>(org.apache.parquet.io.OutputFile,org.apache.parquet.schema.MessageType,org.apache.parquet.hadoop.ParquetFileWriter$Mode,long,int)>(v, v, v, 134217728L, 8388608);
return;
}
public void <init>(oadd.org.apache.hadoop.conf.Configuration, org.apache.parquet.schema.MessageType, oadd.org.apache.hadoop.fs.Path, org.apache.parquet.hadoop.ParquetFileWriter$Mode, long, int) throws java.io.IOException
{
org.apache.parquet.hadoop.ParquetFileWriter v;
org.apache.parquet.hadoop.util.HadoopOutputFile v;
long v;
org.apache.parquet.schema.MessageType v;
org.apache.parquet.hadoop.ParquetFileWriter$Mode v;
int v;
oadd.org.apache.hadoop.conf.Configuration v;
oadd.org.apache.hadoop.fs.Path v;
v := @this: org.apache.parquet.hadoop.ParquetFileWriter;
v := @parameter: oadd.org.apache.hadoop.conf.Configuration;
v := @parameter: org.apache.parquet.schema.MessageType;
v := @parameter: oadd.org.apache.hadoop.fs.Path;
v := @parameter: org.apache.parquet.hadoop.ParquetFileWriter$Mode;
v := @parameter: long;
v := @parameter: int;
v = staticinvoke <org.apache.parquet.hadoop.util.HadoopOutputFile: org.apache.parquet.hadoop.util.HadoopOutputFile fromPath(oadd.org.apache.hadoop.fs.Path,oadd.org.apache.hadoop.conf.Configuration)>(v, v);
specialinvoke v.<org.apache.parquet.hadoop.ParquetFileWriter: void <init>(org.apache.parquet.io.OutputFile,org.apache.parquet.schema.MessageType,org.apache.parquet.hadoop.ParquetFileWriter$Mode,long,int)>(v, v, v, v, v);
return;
}
public void <init>(org.apache.parquet.io.OutputFile, org.apache.parquet.schema.MessageType, org.apache.parquet.hadoop.ParquetFileWriter$Mode, long, int) throws java.io.IOException
{
org.apache.parquet.schema.MessageType v;
org.apache.parquet.io.OutputFile v;
org.apache.parquet.hadoop.ParquetFileWriter$Mode v;
org.apache.parquet.hadoop.ParquetFileWriter v;
int v;
long v;
v := @this: org.apache.parquet.hadoop.ParquetFileWriter;
v := @parameter: org.apache.parquet.io.OutputFile;
v := @parameter: org.apache.parquet.schema.MessageType;
v := @parameter: org.apache.parquet.hadoop.ParquetFileWriter$Mode;
v := @parameter: long;
v := @parameter: int;
specialinvoke v.<org.apache.parquet.hadoop.ParquetFileWriter: void <init>(org.apache.parquet.io.OutputFile,org.apache.parquet.schema.MessageType,org.apache.parquet.hadoop.ParquetFileWriter$Mode,long,int,int,int,boolean)>(v, v, v, v, v, 64, 2147483647, 1);
return;
}
public void <init>(org.apache.parquet.io.OutputFile, org.apache.parquet.schema.MessageType, org.apache.parquet.hadoop.ParquetFileWriter$Mode, long, int, int, int, boolean) throws java.io.IOException
{
org.apache.parquet.hadoop.ParquetFileWriter v;
long v;
org.apache.parquet.schema.MessageType v;
org.apache.parquet.io.OutputFile v;
org.apache.parquet.hadoop.ParquetFileWriter$Mode v;
int v, v, v;
boolean v;
v := @this: org.apache.parquet.hadoop.ParquetFileWriter;
v := @parameter: org.apache.parquet.io.OutputFile;
v := @parameter: org.apache.parquet.schema.MessageType;
v := @parameter: org.apache.parquet.hadoop.ParquetFileWriter$Mode;
v := @parameter: long;
v := @parameter: int;
v := @parameter: int;
v := @parameter: int;
v := @parameter: boolean;
specialinvoke v.<org.apache.parquet.hadoop.ParquetFileWriter: void <init>(org.apache.parquet.io.OutputFile,org.apache.parquet.schema.MessageType,org.apache.parquet.hadoop.ParquetFileWriter$Mode,long,int,int,int,boolean,org.apache.parquet.crypto.FileEncryptionProperties)>(v, v, v, v, v, v, v, v, null);
return;
}
public void <init>(org.apache.parquet.io.OutputFile, org.apache.parquet.schema.MessageType, org.apache.parquet.hadoop.ParquetFileWriter$Mode, long, int, int, int, boolean, org.apache.parquet.crypto.FileEncryptionProperties) throws java.io.IOException
{
org.apache.parquet.format.converter.ParquetMetadataConverter v;
java.util.Map v;
boolean v, v, v, v;
org.apache.parquet.crypto.FileEncryptionProperties v;
java.util.Set v;
org.apache.parquet.hadoop.ParquetFileWriter$STATE v;
org.apache.parquet.schema.MessageType v;
org.apache.parquet.io.OutputFile v;
org.apache.parquet.hadoop.ParquetFileWriter$Mode v, v;
org.apache.parquet.hadoop.ParquetFileWriter$PaddingAlignment v;
org.apache.parquet.crypto.ParquetCryptoRuntimeException v;
java.lang.String[] v;
long v, v, v;
java.util.ArrayList v, v, v, v;
org.apache.parquet.crypto.InternalFileEncryptor v;
int v, v, v;
java.lang.String v, v;
org.apache.parquet.column.EncodingStats$Builder v;
org.apache.parquet.hadoop.ParquetFileWriter$NoAlignment v;
java.util.Iterator v;
org.apache.parquet.hadoop.ParquetFileWriter v;
java.util.zip.CRC32 v, v;
java.lang.Object v, v;
org.apache.parquet.io.PositionOutputStream v, v;
v := @this: org.apache.parquet.hadoop.ParquetFileWriter;
v := @parameter: org.apache.parquet.io.OutputFile;
v := @parameter: org.apache.parquet.schema.MessageType;
v := @parameter: org.apache.parquet.hadoop.ParquetFileWriter$Mode;
v := @parameter: long;
v := @parameter: int;
v := @parameter: int;
v := @parameter: int;
v := @parameter: boolean;
v := @parameter: org.apache.parquet.crypto.FileEncryptionProperties;
specialinvoke v.<java.lang.Object: void <init>()>();
v = new java.util.ArrayList;
specialinvoke v.<java.util.ArrayList: void <init>()>();
v.<org.apache.parquet.hadoop.ParquetFileWriter: java.util.List blocks> = v;
v = new java.util.ArrayList;
specialinvoke v.<java.util.ArrayList: void <init>()>();
v.<org.apache.parquet.hadoop.ParquetFileWriter: java.util.List columnIndexes> = v;
v = new java.util.ArrayList;
specialinvoke v.<java.util.ArrayList: void <init>()>();
v.<org.apache.parquet.hadoop.ParquetFileWriter: java.util.List offsetIndexes> = v;
v = new java.util.ArrayList;
specialinvoke v.<java.util.ArrayList: void <init>()>();
v.<org.apache.parquet.hadoop.ParquetFileWriter: java.util.List bloomFilters> = v;
v.<org.apache.parquet.hadoop.ParquetFileWriter: org.apache.parquet.hadoop.metadata.ParquetMetadata footer> = null;
v = <org.apache.parquet.hadoop.ParquetFileWriter$STATE: org.apache.parquet.hadoop.ParquetFileWriter$STATE NOT_STARTED>;
v.<org.apache.parquet.hadoop.ParquetFileWriter: org.apache.parquet.hadoop.ParquetFileWriter$STATE state> = v;
staticinvoke <org.apache.parquet.schema.TypeUtil: void checkValidWriteSchema(org.apache.parquet.schema.GroupType)>(v);
v.<org.apache.parquet.hadoop.ParquetFileWriter: org.apache.parquet.schema.MessageType schema> = v;
v = v;
v = interfaceinvoke v.<org.apache.parquet.io.OutputFile: boolean supportsBlockSize()>();
if v == 0 goto label;
v = interfaceinvoke v.<org.apache.parquet.io.OutputFile: long defaultBlockSize()>();
v = staticinvoke <java.lang.Math: long max(long,long)>(v, v);
v = staticinvoke <org.apache.parquet.hadoop.ParquetFileWriter$PaddingAlignment: org.apache.parquet.hadoop.ParquetFileWriter$PaddingAlignment get(long,long,int)>(v, v, v);
v.<org.apache.parquet.hadoop.ParquetFileWriter: org.apache.parquet.hadoop.ParquetFileWriter$AlignmentStrategy alignment> = v;
goto label;
label:
v = staticinvoke <org.apache.parquet.hadoop.ParquetFileWriter$NoAlignment: org.apache.parquet.hadoop.ParquetFileWriter$NoAlignment get(long)>(v);
v.<org.apache.parquet.hadoop.ParquetFileWriter: org.apache.parquet.hadoop.ParquetFileWriter$AlignmentStrategy alignment> = v;
label:
v = <org.apache.parquet.hadoop.ParquetFileWriter$Mode: org.apache.parquet.hadoop.ParquetFileWriter$Mode OVERWRITE>;
if v != v goto label;
v = interfaceinvoke v.<org.apache.parquet.io.OutputFile: org.apache.parquet.io.PositionOutputStream createOrOverwrite(long)>(v);
v.<org.apache.parquet.hadoop.ParquetFileWriter: org.apache.parquet.io.PositionOutputStream out> = v;
goto label;
label:
v = interfaceinvoke v.<org.apache.parquet.io.OutputFile: org.apache.parquet.io.PositionOutputStream create(long)>(v);
v.<org.apache.parquet.hadoop.ParquetFileWriter: org.apache.parquet.io.PositionOutputStream out> = v;
label:
v = new org.apache.parquet.column.EncodingStats$Builder;
specialinvoke v.<org.apache.parquet.column.EncodingStats$Builder: void <init>()>();
v.<org.apache.parquet.hadoop.ParquetFileWriter: org.apache.parquet.column.EncodingStats$Builder encodingStatsBuilder> = v;
v.<org.apache.parquet.hadoop.ParquetFileWriter: int columnIndexTruncateLength> = v;
v.<org.apache.parquet.hadoop.ParquetFileWriter: boolean pageWriteChecksumEnabled> = v;
if v == 0 goto label;
v = new java.util.zip.CRC32;
v = v;
specialinvoke v.<java.util.zip.CRC32: void <init>()>();
goto label;
label:
v = null;
label:
v.<org.apache.parquet.hadoop.ParquetFileWriter: java.util.zip.CRC32 crc> = v;
v = new org.apache.parquet.format.converter.ParquetMetadataConverter;
specialinvoke v.<org.apache.parquet.format.converter.ParquetMetadataConverter: void <init>(int)>(v);
v.<org.apache.parquet.hadoop.ParquetFileWriter: org.apache.parquet.format.converter.ParquetMetadataConverter metadataConverter> = v;
if null != v goto label;
v.<org.apache.parquet.hadoop.ParquetFileWriter: org.apache.parquet.crypto.InternalFileEncryptor fileEncryptor> = null;
goto label;
label:
v = virtualinvoke v.<org.apache.parquet.crypto.FileEncryptionProperties: java.util.Map getEncryptedColumns()>();
if null == v goto label;
v = interfaceinvoke v.<java.util.Map: java.util.Set entrySet()>();
v = interfaceinvoke v.<java.util.Set: java.util.Iterator iterator()>();
label:
v = interfaceinvoke v.<java.util.Iterator: boolean hasNext()>();
if v == 0 goto label;
v = interfaceinvoke v.<java.util.Iterator: java.lang.Object next()>();
v = interfaceinvoke v.<java.util.Map$Entry: java.lang.Object getKey()>();
v = virtualinvoke v.<org.apache.parquet.hadoop.metadata.ColumnPath: java.lang.String[] toArray()>();
v = virtualinvoke v.<org.apache.parquet.schema.MessageType: boolean containsPath(java.lang.String[])>(v);
if v != 0 goto label;
v = new org.apache.parquet.crypto.ParquetCryptoRuntimeException;
v = staticinvoke <java.util.Arrays: java.lang.String toString(java.lang.Object[])>(v);
v = dynamicinvoke "makeConcatWithConstants" <java.lang.String (java.lang.String)>(v) <java.lang.invoke.StringConcatFactory: java.lang.invoke.CallSite makeConcatWithConstants(java.lang.invoke.MethodHandles$Lookup,java.lang.String,java.lang.invoke.MethodType,java.lang.String,java.lang.Object[])>("Encrypted column \u not in file schema");
specialinvoke v.<org.apache.parquet.crypto.ParquetCryptoRuntimeException: void <init>(java.lang.String)>(v);
throw v;
label:
v = new org.apache.parquet.crypto.InternalFileEncryptor;
specialinvoke v.<org.apache.parquet.crypto.InternalFileEncryptor: void <init>(org.apache.parquet.crypto.FileEncryptionProperties)>(v);
v.<org.apache.parquet.hadoop.ParquetFileWriter: org.apache.parquet.crypto.InternalFileEncryptor fileEncryptor> = v;
label:
return;
}
void <init>(oadd.org.apache.hadoop.conf.Configuration, org.apache.parquet.schema.MessageType, oadd.org.apache.hadoop.fs.Path, long, int) throws java.io.IOException
{
long v;
org.apache.parquet.format.converter.ParquetMetadataConverter v;
java.util.ArrayList v, v, v, v;
short v;
int v;
oadd.org.apache.hadoop.conf.Configuration v;
org.apache.parquet.column.EncodingStats$Builder v;
boolean v, v;
org.apache.parquet.hadoop.ParquetFileWriter v;
org.apache.parquet.hadoop.ParquetFileWriter$STATE v;
org.apache.parquet.schema.MessageType v;
oadd.org.apache.hadoop.fs.FSDataOutputStream v;
org.apache.parquet.hadoop.ParquetFileWriter$PaddingAlignment v;
java.util.zip.CRC32 v;
oadd.org.apache.hadoop.fs.Path v;
oadd.org.apache.hadoop.fs.FileSystem v;
org.apache.parquet.io.PositionOutputStream v;
v := @this: org.apache.parquet.hadoop.ParquetFileWriter;
v := @parameter: oadd.org.apache.hadoop.conf.Configuration;
v := @parameter: org.apache.parquet.schema.MessageType;
v := @parameter: oadd.org.apache.hadoop.fs.Path;
v := @parameter: long;
v := @parameter: int;
specialinvoke v.<java.lang.Object: void <init>()>();
v = new java.util.ArrayList;
specialinvoke v.<java.util.ArrayList: void <init>()>();
v.<org.apache.parquet.hadoop.ParquetFileWriter: java.util.List blocks> = v;
v = new java.util.ArrayList;
specialinvoke v.<java.util.ArrayList: void <init>()>();
v.<org.apache.parquet.hadoop.ParquetFileWriter: java.util.List columnIndexes> = v;
v = new java.util.ArrayList;
specialinvoke v.<java.util.ArrayList: void <init>()>();
v.<org.apache.parquet.hadoop.ParquetFileWriter: java.util.List offsetIndexes> = v;
v = new java.util.ArrayList;
specialinvoke v.<java.util.ArrayList: void <init>()>();
v.<org.apache.parquet.hadoop.ParquetFileWriter: java.util.List bloomFilters> = v;
v.<org.apache.parquet.hadoop.ParquetFileWriter: org.apache.parquet.hadoop.metadata.ParquetMetadata footer> = null;
v = <org.apache.parquet.hadoop.ParquetFileWriter$STATE: org.apache.parquet.hadoop.ParquetFileWriter$STATE NOT_STARTED>;
v.<org.apache.parquet.hadoop.ParquetFileWriter: org.apache.parquet.hadoop.ParquetFileWriter$STATE state> = v;
v = virtualinvoke v.<oadd.org.apache.hadoop.fs.Path: oadd.org.apache.hadoop.fs.FileSystem getFileSystem(oadd.org.apache.hadoop.conf.Configuration)>(v);
v.<org.apache.parquet.hadoop.ParquetFileWriter: org.apache.parquet.schema.MessageType schema> = v;
v = staticinvoke <org.apache.parquet.hadoop.ParquetFileWriter$PaddingAlignment: org.apache.parquet.hadoop.ParquetFileWriter$PaddingAlignment get(long,long,int)>(v, v, v);
v.<org.apache.parquet.hadoop.ParquetFileWriter: org.apache.parquet.hadoop.ParquetFileWriter$AlignmentStrategy alignment> = v;
v = virtualinvoke v.<oadd.org.apache.hadoop.fs.FileSystem: short getDefaultReplication(oadd.org.apache.hadoop.fs.Path)>(v);
v = virtualinvoke v.<oadd.org.apache.hadoop.fs.FileSystem: oadd.org.apache.hadoop.fs.FSDataOutputStream create(oadd.org.apache.hadoop.fs.Path,boolean,int,short,long)>(v, 1, 8192, v, v);
v = staticinvoke <org.apache.parquet.hadoop.util.HadoopStreams: org.apache.parquet.io.PositionOutputStream wrap(oadd.org.apache.hadoop.fs.FSDataOutputStream)>(v);
v.<org.apache.parquet.hadoop.ParquetFileWriter: org.apache.parquet.io.PositionOutputStream out> = v;
v = new org.apache.parquet.column.EncodingStats$Builder;
specialinvoke v.<org.apache.parquet.column.EncodingStats$Builder: void <init>()>();
v.<org.apache.parquet.hadoop.ParquetFileWriter: org.apache.parquet.column.EncodingStats$Builder encodingStatsBuilder> = v;
v.<org.apache.parquet.hadoop.ParquetFileWriter: int columnIndexTruncateLength> = 2147483647;
v = staticinvoke <org.apache.parquet.hadoop.ParquetOutputFormat: boolean getPageWriteChecksumEnabled(oadd.org.apache.hadoop.conf.Configuration)>(v);
v.<org.apache.parquet.hadoop.ParquetFileWriter: boolean pageWriteChecksumEnabled> = v;
v = v.<org.apache.parquet.hadoop.ParquetFileWriter: boolean pageWriteChecksumEnabled>;
if v == 0 goto label;
v = new java.util.zip.CRC32;
specialinvoke v.<java.util.zip.CRC32: void <init>()>();
goto label;
label:
v = null;
label:
v.<org.apache.parquet.hadoop.ParquetFileWriter: java.util.zip.CRC32 crc> = v;
v = new org.apache.parquet.format.converter.ParquetMetadataConverter;
specialinvoke v.<org.apache.parquet.format.converter.ParquetMetadataConverter: void <init>(int)>(2147483647);
v.<org.apache.parquet.hadoop.ParquetFileWriter: org.apache.parquet.format.converter.ParquetMetadataConverter metadataConverter> = v;
v.<org.apache.parquet.hadoop.ParquetFileWriter: org.apache.parquet.crypto.InternalFileEncryptor fileEncryptor> = null;
return;
}
public void start() throws java.io.IOException
{
byte[] v;
org.slf4j.Logger v;
org.apache.parquet.hadoop.ParquetFileWriter v;
long v;
org.apache.parquet.crypto.InternalFileEncryptor v, v;
org.apache.parquet.hadoop.ParquetFileWriter$STATE v, v;
java.lang.Long v;
org.apache.parquet.io.PositionOutputStream v, v;
boolean v;
v := @this: org.apache.parquet.hadoop.ParquetFileWriter;
v = v.<org.apache.parquet.hadoop.ParquetFileWriter: org.apache.parquet.hadoop.ParquetFileWriter$STATE state>;
v = virtualinvoke v.<org.apache.parquet.hadoop.ParquetFileWriter$STATE: org.apache.parquet.hadoop.ParquetFileWriter$STATE start()>();
v.<org.apache.parquet.hadoop.ParquetFileWriter: org.apache.parquet.hadoop.ParquetFileWriter$STATE state> = v;
v = <org.apache.parquet.hadoop.ParquetFileWriter: org.slf4j.Logger LOG>;
v = v.<org.apache.parquet.hadoop.ParquetFileWriter: org.apache.parquet.io.PositionOutputStream out>;
v = virtualinvoke v.<org.apache.parquet.io.PositionOutputStream: long getPos()>();
v = staticinvoke <java.lang.Long: java.lang.Long valueOf(long)>(v);
interfaceinvoke v.<org.slf4j.Logger: void debug(java.lang.String,java.lang.Object)>("{}: start", v);
v = <org.apache.parquet.hadoop.ParquetFileWriter: byte[] MAGIC>;
v = v.<org.apache.parquet.hadoop.ParquetFileWriter: org.apache.parquet.crypto.InternalFileEncryptor fileEncryptor>;
if null == v goto label;
v = v.<org.apache.parquet.hadoop.ParquetFileWriter: org.apache.parquet.crypto.InternalFileEncryptor fileEncryptor>;
v = virtualinvoke v.<org.apache.parquet.crypto.InternalFileEncryptor: boolean isFooterEncrypted()>();
if v == 0 goto label;
v = <org.apache.parquet.hadoop.ParquetFileWriter: byte[] EFMAGIC>;
label:
v = v.<org.apache.parquet.hadoop.ParquetFileWriter: org.apache.parquet.io.PositionOutputStream out>;
virtualinvoke v.<org.apache.parquet.io.PositionOutputStream: void write(byte[])>(v);
return;
}
org.apache.parquet.crypto.InternalFileEncryptor getEncryptor()
{
org.apache.parquet.hadoop.ParquetFileWriter v;
org.apache.parquet.crypto.InternalFileEncryptor v;
v := @this: org.apache.parquet.hadoop.ParquetFileWriter;
v = v.<org.apache.parquet.hadoop.ParquetFileWriter: org.apache.parquet.crypto.InternalFileEncryptor fileEncryptor>;
return v;
}
public void startBlock(long) throws java.io.IOException
{
org.slf4j.Logger v;
org.apache.parquet.hadoop.ParquetFileWriter v;
java.util.HashMap v;
long v, v;
org.apache.parquet.hadoop.ParquetFileWriter$AlignmentStrategy v;
java.util.ArrayList v, v;
org.apache.parquet.hadoop.ParquetFileWriter$STATE v, v;
java.lang.Long v;
org.apache.parquet.hadoop.metadata.BlockMetaData v;
org.apache.parquet.io.PositionOutputStream v, v;
v := @this: org.apache.parquet.hadoop.ParquetFileWriter;
v := @parameter: long;
v = v.<org.apache.parquet.hadoop.ParquetFileWriter: org.apache.parquet.hadoop.ParquetFileWriter$STATE state>;
v = virtualinvoke v.<org.apache.parquet.hadoop.ParquetFileWriter$STATE: org.apache.parquet.hadoop.ParquetFileWriter$STATE startBlock()>();
v.<org.apache.parquet.hadoop.ParquetFileWriter: org.apache.parquet.hadoop.ParquetFileWriter$STATE state> = v;
v = <org.apache.parquet.hadoop.ParquetFileWriter: org.slf4j.Logger LOG>;
v = v.<org.apache.parquet.hadoop.ParquetFileWriter: org.apache.parquet.io.PositionOutputStream out>;
v = virtualinvoke v.<org.apache.parquet.io.PositionOutputStream: long getPos()>();
v = staticinvoke <java.lang.Long: java.lang.Long valueOf(long)>(v);
interfaceinvoke v.<org.slf4j.Logger: void debug(java.lang.String,java.lang.Object)>("{}: start block", v);
v = v.<org.apache.parquet.hadoop.ParquetFileWriter: org.apache.parquet.hadoop.ParquetFileWriter$AlignmentStrategy alignment>;
v = v.<org.apache.parquet.hadoop.ParquetFileWriter: org.apache.parquet.io.PositionOutputStream out>;
interfaceinvoke v.<org.apache.parquet.hadoop.ParquetFileWriter$AlignmentStrategy: void alignForRowGroup(org.apache.parquet.io.PositionOutputStream)>(v);
v = new org.apache.parquet.hadoop.metadata.BlockMetaData;
specialinvoke v.<org.apache.parquet.hadoop.metadata.BlockMetaData: void <init>()>();
v.<org.apache.parquet.hadoop.ParquetFileWriter: org.apache.parquet.hadoop.metadata.BlockMetaData currentBlock> = v;
v.<org.apache.parquet.hadoop.ParquetFileWriter: long currentRecordCount> = v;
v = new java.util.ArrayList;
specialinvoke v.<java.util.ArrayList: void <init>()>();
v.<org.apache.parquet.hadoop.ParquetFileWriter: java.util.List currentColumnIndexes> = v;
v = new java.util.ArrayList;
specialinvoke v.<java.util.ArrayList: void <init>()>();
v.<org.apache.parquet.hadoop.ParquetFileWriter: java.util.List currentOffsetIndexes> = v;
v = new java.util.HashMap;
specialinvoke v.<java.util.HashMap: void <init>()>();
v.<org.apache.parquet.hadoop.ParquetFileWriter: java.util.Map currentBloomFilters> = v;
return;
}
public void startColumn(org.apache.parquet.column.ColumnDescriptor, long, org.apache.parquet.hadoop.metadata.CompressionCodecName) throws java.io.IOException
{
org.apache.parquet.schema.PrimitiveType v, v;
org.apache.parquet.hadoop.metadata.ColumnPath v;
org.apache.parquet.hadoop.ParquetFileWriter v;
org.apache.parquet.internal.column.columnindex.OffsetIndexBuilder v;
java.lang.String[] v;
long v;
org.apache.parquet.internal.column.columnindex.ColumnIndexBuilder v;
org.apache.parquet.hadoop.ParquetFileWriter$STATE v, v;
java.util.HashSet v;
org.apache.parquet.column.ColumnDescriptor v;
int v;
org.apache.parquet.hadoop.metadata.CompressionCodecName v;
org.apache.parquet.column.EncodingStats$Builder v;
v := @this: org.apache.parquet.hadoop.ParquetFileWriter;
v := @parameter: org.apache.parquet.column.ColumnDescriptor;
v := @parameter: long;
v := @parameter: org.apache.parquet.hadoop.metadata.CompressionCodecName;
v = v.<org.apache.parquet.hadoop.ParquetFileWriter: org.apache.parquet.hadoop.ParquetFileWriter$STATE state>;
v = virtualinvoke v.<org.apache.parquet.hadoop.ParquetFileWriter$STATE: org.apache.parquet.hadoop.ParquetFileWriter$STATE startColumn()>();
v.<org.apache.parquet.hadoop.ParquetFileWriter: org.apache.parquet.hadoop.ParquetFileWriter$STATE state> = v;
v = v.<org.apache.parquet.hadoop.ParquetFileWriter: org.apache.parquet.column.EncodingStats$Builder encodingStatsBuilder>;
virtualinvoke v.<org.apache.parquet.column.EncodingStats$Builder: org.apache.parquet.column.EncodingStats$Builder clear()>();
v = new java.util.HashSet;
specialinvoke v.<java.util.HashSet: void <init>()>();
v.<org.apache.parquet.hadoop.ParquetFileWriter: java.util.Set currentEncodings> = v;
v = virtualinvoke v.<org.apache.parquet.column.ColumnDescriptor: java.lang.String[] getPath()>();
v = staticinvoke <org.apache.parquet.hadoop.metadata.ColumnPath: org.apache.parquet.hadoop.metadata.ColumnPath get(java.lang.String[])>(v);
v.<org.apache.parquet.hadoop.ParquetFileWriter: org.apache.parquet.hadoop.metadata.ColumnPath currentChunkPath> = v;
v = virtualinvoke v.<org.apache.parquet.column.ColumnDescriptor: org.apache.parquet.schema.PrimitiveType getPrimitiveType()>();
v.<org.apache.parquet.hadoop.ParquetFileWriter: org.apache.parquet.schema.PrimitiveType currentChunkType> = v;
v.<org.apache.parquet.hadoop.ParquetFileWriter: org.apache.parquet.hadoop.metadata.CompressionCodecName currentChunkCodec> = v;
v.<org.apache.parquet.hadoop.ParquetFileWriter: long currentChunkValueCount> = v;
v.<org.apache.parquet.hadoop.ParquetFileWriter: long currentChunkFirstDataPage> = -1L;
v.<org.apache.parquet.hadoop.ParquetFileWriter: long compressedLength> = 0L;
v.<org.apache.parquet.hadoop.ParquetFileWriter: long uncompressedLength> = 0L;
v.<org.apache.parquet.hadoop.ParquetFileWriter: org.apache.parquet.column.statistics.Statistics currentStatistics> = null;
v = v.<org.apache.parquet.hadoop.ParquetFileWriter: org.apache.parquet.schema.PrimitiveType currentChunkType>;
v = v.<org.apache.parquet.hadoop.ParquetFileWriter: int columnIndexTruncateLength>;
v = staticinvoke <org.apache.parquet.internal.column.columnindex.ColumnIndexBuilder: org.apache.parquet.internal.column.columnindex.ColumnIndexBuilder getBuilder(org.apache.parquet.schema.PrimitiveType,int)>(v, v);
v.<org.apache.parquet.hadoop.ParquetFileWriter: org.apache.parquet.internal.column.columnindex.ColumnIndexBuilder columnIndexBuilder> = v;
v = staticinvoke <org.apache.parquet.internal.column.columnindex.OffsetIndexBuilder: org.apache.parquet.internal.column.columnindex.OffsetIndexBuilder getBuilder()>();
v.<org.apache.parquet.hadoop.ParquetFileWriter: org.apache.parquet.internal.column.columnindex.OffsetIndexBuilder offsetIndexBuilder> = v;
return;
}
public void writeDictionaryPage(org.apache.parquet.column.page.DictionaryPage) throws java.io.IOException
{
org.apache.parquet.column.page.DictionaryPage v;
org.apache.parquet.hadoop.ParquetFileWriter v;
v := @this: org.apache.parquet.hadoop.ParquetFileWriter;
v := @parameter: org.apache.parquet.column.page.DictionaryPage;
virtualinvoke v.<org.apache.parquet.hadoop.ParquetFileWriter: void writeDictionaryPage(org.apache.parquet.column.page.DictionaryPage,org.apache.parquet.format.BlockCipher$Encryptor,byte[])>(v, null, null);
return;
}
public void writeDictionaryPage(org.apache.parquet.column.page.DictionaryPage, org.apache.parquet.format.BlockCipher$Encryptor, byte[]) throws java.io.IOException
{
byte[] v, v;
org.apache.parquet.column.Encoding v, v, v, v;
org.apache.parquet.format.converter.ParquetMetadataConverter v, v;
java.lang.Integer v, v;
java.lang.Long v, v;
org.apache.parquet.bytes.BytesInput v, v, v;
org.apache.parquet.format.BlockCipher$Encryptor v;
boolean v;
java.util.Set v;
org.apache.parquet.hadoop.ParquetFileWriter$STATE v, v;
long v, v, v, v, v, v, v, v, v, v, v, v, v, v;
org.apache.parquet.column.page.DictionaryPage v;
int v, v, v, v, v;
org.apache.parquet.column.EncodingStats$Builder v;
org.slf4j.Logger v, v;
org.apache.parquet.hadoop.ParquetFileWriter v;
java.util.zip.CRC32 v, v, v;
org.apache.parquet.io.PositionOutputStream v, v, v, v, v, v, v;
v := @this: org.apache.parquet.hadoop.ParquetFileWriter;
v := @parameter: org.apache.parquet.column.page.DictionaryPage;
v := @parameter: org.apache.parquet.format.BlockCipher$Encryptor;
v := @parameter: byte[];
v = v.<org.apache.parquet.hadoop.ParquetFileWriter: org.apache.parquet.hadoop.ParquetFileWriter$STATE state>;
v = virtualinvoke v.<org.apache.parquet.hadoop.ParquetFileWriter$STATE: org.apache.parquet.hadoop.ParquetFileWriter$STATE write()>();
v.<org.apache.parquet.hadoop.ParquetFileWriter: org.apache.parquet.hadoop.ParquetFileWriter$STATE state> = v;
v = <org.apache.parquet.hadoop.ParquetFileWriter: org.slf4j.Logger LOG>;
v = v.<org.apache.parquet.hadoop.ParquetFileWriter: org.apache.parquet.io.PositionOutputStream out>;
v = virtualinvoke v.<org.apache.parquet.io.PositionOutputStream: long getPos()>();
v = staticinvoke <java.lang.Long: java.lang.Long valueOf(long)>(v);
v = virtualinvoke v.<org.apache.parquet.column.page.DictionaryPage: int getDictionarySize()>();
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(v);
interfaceinvoke v.<org.slf4j.Logger: void debug(java.lang.String,java.lang.Object,java.lang.Object)>("{}: write dictionary page: {} values", v, v);
v = v.<org.apache.parquet.hadoop.ParquetFileWriter: org.apache.parquet.io.PositionOutputStream out>;
v = virtualinvoke v.<org.apache.parquet.io.PositionOutputStream: long getPos()>();
v.<org.apache.parquet.hadoop.ParquetFileWriter: long currentChunkDictionaryPageOffset> = v;
v = virtualinvoke v.<org.apache.parquet.column.page.DictionaryPage: int getUncompressedSize()>();
v = virtualinvoke v.<org.apache.parquet.column.page.DictionaryPage: org.apache.parquet.bytes.BytesInput getBytes()>();
v = virtualinvoke v.<org.apache.parquet.bytes.BytesInput: long size()>();
v = v.<org.apache.parquet.hadoop.ParquetFileWriter: boolean pageWriteChecksumEnabled>;
if v == 0 goto label;
v = v.<org.apache.parquet.hadoop.ParquetFileWriter: java.util.zip.CRC32 crc>;
virtualinvoke v.<java.util.zip.CRC32: void reset()>();
v = v.<org.apache.parquet.hadoop.ParquetFileWriter: java.util.zip.CRC32 crc>;
v = virtualinvoke v.<org.apache.parquet.column.page.DictionaryPage: org.apache.parquet.bytes.BytesInput getBytes()>();
v = virtualinvoke v.<org.apache.parquet.bytes.BytesInput: byte[] toByteArray()>();
virtualinvoke v.<java.util.zip.CRC32: void update(byte[])>(v);
v = v.<org.apache.parquet.hadoop.ParquetFileWriter: org.apache.parquet.format.converter.ParquetMetadataConverter metadataConverter>;
v = virtualinvoke v.<org.apache.parquet.column.page.DictionaryPage: int getDictionarySize()>();
v = virtualinvoke v.<org.apache.parquet.column.page.DictionaryPage: org.apache.parquet.column.Encoding getEncoding()>();
v = v.<org.apache.parquet.hadoop.ParquetFileWriter: java.util.zip.CRC32 crc>;
v = virtualinvoke v.<java.util.zip.CRC32: long getValue()>();
v = v.<org.apache.parquet.hadoop.ParquetFileWriter: org.apache.parquet.io.PositionOutputStream out>;
virtualinvoke v.<org.apache.parquet.format.converter.ParquetMetadataConverter: void writeDictionaryPageHeader(int,int,int,org.apache.parquet.column.Encoding,int,java.io.OutputStream,org.apache.parquet.format.BlockCipher$Encryptor,byte[])>(v, v, v, v, v, v, v, v);
goto label;
label:
v = v.<org.apache.parquet.hadoop.ParquetFileWriter: org.apache.parquet.format.converter.ParquetMetadataConverter metadataConverter>;
v = virtualinvoke v.<org.apache.parquet.column.page.DictionaryPage: int getDictionarySize()>();
v = virtualinvoke v.<org.apache.parquet.column.page.DictionaryPage: org.apache.parquet.column.Encoding getEncoding()>();
v = v.<org.apache.parquet.hadoop.ParquetFileWriter: org.apache.parquet.io.PositionOutputStream out>;
virtualinvoke v.<org.apache.parquet.format.converter.ParquetMetadataConverter: void writeDictionaryPageHeader(int,int,int,org.apache.parquet.column.Encoding,java.io.OutputStream,org.apache.parquet.format.BlockCipher$Encryptor,byte[])>(v, v, v, v, v, v, v);
label:
v = v.<org.apache.parquet.hadoop.ParquetFileWriter: org.apache.parquet.io.PositionOutputStream out>;
v = virtualinvoke v.<org.apache.parquet.io.PositionOutputStream: long getPos()>();
v = v.<org.apache.parquet.hadoop.ParquetFileWriter: long currentChunkDictionaryPageOffset>;
v = v - v;
v = v.<org.apache.parquet.hadoop.ParquetFileWriter: long uncompressedLength>;
v = v + v;
v = v + v;
v.<org.apache.parquet.hadoop.ParquetFileWriter: long uncompressedLength> = v;
v = v.<org.apache.parquet.hadoop.ParquetFileWriter: long compressedLength>;
v = v + v;
v = v + v;
v.<org.apache.parquet.hadoop.ParquetFileWriter: long compressedLength> = v;
v = <org.apache.parquet.hadoop.ParquetFileWriter: org.slf4j.Logger LOG>;
v = v.<org.apache.parquet.hadoop.ParquetFileWriter: org.apache.parquet.io.PositionOutputStream out>;
v = virtualinvoke v.<org.apache.parquet.io.PositionOutputStream: long getPos()>();
v = staticinvoke <java.lang.Long: java.lang.Long valueOf(long)>(v);
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(v);
interfaceinvoke v.<org.slf4j.Logger: void debug(java.lang.String,java.lang.Object,java.lang.Object)>("{}: write dictionary page content {}", v, v);
v = virtualinvoke v.<org.apache.parquet.column.page.DictionaryPage: org.apache.parquet.bytes.BytesInput getBytes()>();
v = v.<org.apache.parquet.hadoop.ParquetFileWriter: org.apache.parquet.io.PositionOutputStream out>;
virtualinvoke v.<org.apache.parquet.bytes.BytesInput: void writeAllTo(java.io.OutputStream)>(v);
v = v.<org.apache.parquet.hadoop.ParquetFileWriter: org.apache.parquet.column.EncodingStats$Builder encodingStatsBuilder>;
v = virtualinvoke v.<org.apache.parquet.column.page.DictionaryPage: org.apache.parquet.column.Encoding getEncoding()>();
virtualinvoke v.<org.apache.parquet.column.EncodingStats$Builder: org.apache.parquet.column.EncodingStats$Builder addDictEncoding(org.apache.parquet.column.Encoding)>(v);
v = v.<org.apache.parquet.hadoop.ParquetFileWriter: java.util.Set currentEncodings>;
v = virtualinvoke v.<org.apache.parquet.column.page.DictionaryPage: org.apache.parquet.column.Encoding getEncoding()>();
interfaceinvoke v.<java.util.Set: boolean add(java.lang.Object)>(v);
return;
}
public void writeDataPage(int, int, org.apache.parquet.bytes.BytesInput, org.apache.parquet.column.Encoding, org.apache.parquet.column.Encoding, org.apache.parquet.column.Encoding) throws java.io.IOException
{
org.apache.parquet.column.Encoding v, v, v;
org.apache.parquet.format.converter.ParquetMetadataConverter v;
java.lang.Integer v, v;
byte v;
java.lang.Long v, v;
org.apache.parquet.bytes.BytesInput v;
org.apache.parquet.internal.column.columnindex.OffsetIndexBuilder v;
java.util.Set v, v, v;
org.apache.parquet.hadoop.ParquetFileWriter$STATE v, v;
long v, v, v, v, v, v, v, v, v, v, v, v;
org.apache.parquet.internal.column.columnindex.ColumnIndexBuilder v;
int v, v, v;
org.apache.parquet.column.EncodingStats$Builder v;
org.slf4j.Logger v, v;
org.apache.parquet.hadoop.ParquetFileWriter v;
org.apache.parquet.io.PositionOutputStream v, v, v, v, v;
v := @this: org.apache.parquet.hadoop.ParquetFileWriter;
v := @parameter: int;
v := @parameter: int;
v := @parameter: org.apache.parquet.bytes.BytesInput;
v := @parameter: org.apache.parquet.column.Encoding;
v := @parameter: org.apache.parquet.column.Encoding;
v := @parameter: org.apache.parquet.column.Encoding;
v = v.<org.apache.parquet.hadoop.ParquetFileWriter: org.apache.parquet.hadoop.ParquetFileWriter$STATE state>;
v = virtualinvoke v.<org.apache.parquet.hadoop.ParquetFileWriter$STATE: org.apache.parquet.hadoop.ParquetFileWriter$STATE write()>();
v.<org.apache.parquet.hadoop.ParquetFileWriter: org.apache.parquet.hadoop.ParquetFileWriter$STATE state> = v;
v = staticinvoke <org.apache.parquet.internal.column.columnindex.OffsetIndexBuilder: org.apache.parquet.internal.column.columnindex.OffsetIndexBuilder getNoOpBuilder()>();
v.<org.apache.parquet.hadoop.ParquetFileWriter: org.apache.parquet.internal.column.columnindex.OffsetIndexBuilder offsetIndexBuilder> = v;
v = staticinvoke <org.apache.parquet.internal.column.columnindex.ColumnIndexBuilder: org.apache.parquet.internal.column.columnindex.ColumnIndexBuilder getNoOpBuilder()>();
v.<org.apache.parquet.hadoop.ParquetFileWriter: org.apache.parquet.internal.column.columnindex.ColumnIndexBuilder columnIndexBuilder> = v;
v = v.<org.apache.parquet.hadoop.ParquetFileWriter: org.apache.parquet.io.PositionOutputStream out>;
v = virtualinvoke v.<org.apache.parquet.io.PositionOutputStream: long getPos()>();
v = <org.apache.parquet.hadoop.ParquetFileWriter: org.slf4j.Logger LOG>;
v = staticinvoke <java.lang.Long: java.lang.Long valueOf(long)>(v);
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(v);
interfaceinvoke v.<org.slf4j.Logger: void debug(java.lang.String,java.lang.Object,java.lang.Object)>("{}: write data page: {} values", v, v);
v = virtualinvoke v.<org.apache.parquet.bytes.BytesInput: long size()>();
v = v.<org.apache.parquet.hadoop.ParquetFileWriter: org.apache.parquet.format.converter.ParquetMetadataConverter metadataConverter>;
v = v.<org.apache.parquet.hadoop.ParquetFileWriter: org.apache.parquet.io.PositionOutputStream out>;
virtualinvoke v.<org.apache.parquet.format.converter.ParquetMetadataConverter: void writeDataPageV1Header(int,int,int,org.apache.parquet.column.Encoding,org.apache.parquet.column.Encoding,org.apache.parquet.column.Encoding,java.io.OutputStream)>(v, v, v, v, v, v, v);
v = v.<org.apache.parquet.hadoop.ParquetFileWriter: org.apache.parquet.io.PositionOutputStream out>;
v = virtualinvoke v.<org.apache.parquet.io.PositionOutputStream: long getPos()>();
v = v - v;
v = v.<org.apache.parquet.hadoop.ParquetFileWriter: long uncompressedLength>;
v = v + v;
v = v + v;
v.<org.apache.parquet.hadoop.ParquetFileWriter: long uncompressedLength> = v;
v = v.<org.apache.parquet.hadoop.ParquetFileWriter: long compressedLength>;
v = v + v;
v = v + v;
v.<org.apache.parquet.hadoop.ParquetFileWriter: long compressedLength> = v;
v = <org.apache.parquet.hadoop.ParquetFileWriter: org.slf4j.Logger LOG>;
v = v.<org.apache.parquet.hadoop.ParquetFileWriter: org.apache.parquet.io.PositionOutputStream out>;
v = virtualinvoke v.<org.apache.parquet.io.PositionOutputStream: long getPos()>();
v = staticinvoke <java.lang.Long: java.lang.Long valueOf(long)>(v);
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(v);
interfaceinvoke v.<org.slf4j.Logger: void debug(java.lang.String,java.lang.Object,java.lang.Object)>("{}: write data page content {}", v, v);
v = v.<org.apache.parquet.hadoop.ParquetFileWriter: org.apache.parquet.io.PositionOutputStream out>;
virtualinvoke v.<org.apache.parquet.bytes.BytesInput: void writeAllTo(java.io.OutputStream)>(v);
v = v.<org.apache.parquet.hadoop.ParquetFileWriter: org.apache.parquet.column.EncodingStats$Builder encodingStatsBuilder>;
virtualinvoke v.<org.apache.parquet.column.EncodingStats$Builder: org.apache.parquet.column.EncodingStats$Builder addDataEncoding(org.apache.parquet.column.Encoding)>(v);
v = v.<org.apache.parquet.hadoop.ParquetFileWriter: java.util.Set currentEncodings>;
interfaceinvoke v.<java.util.Set: boolean add(java.lang.Object)>(v);
v = v.<org.apache.parquet.hadoop.ParquetFileWriter: java.util.Set currentEncodings>;
interfaceinvoke v.<java.util.Set: boolean add(java.lang.Object)>(v);
v = v.<org.apache.parquet.hadoop.ParquetFileWriter: java.util.Set currentEncodings>;
interfaceinvoke v.<java.util.Set: boolean add(java.lang.Object)>(v);
v = v.<org.apache.parquet.hadoop.ParquetFileWriter: long currentChunkFirstDataPage>;
v = v cmp 0L;
if v >= 0 goto label;
v.<org.apache.parquet.hadoop.ParquetFileWriter: long currentChunkFirstDataPage> = v;
label:
return;
}
public void writeDataPage(int, int, org.apache.parquet.bytes.BytesInput, org.apache.parquet.column.statistics.Statistics, org.apache.parquet.column.Encoding, org.apache.parquet.column.Encoding, org.apache.parquet.column.Encoding) throws java.io.IOException
{
org.apache.parquet.column.Encoding v, v, v;
org.apache.parquet.column.statistics.Statistics v;
org.apache.parquet.hadoop.ParquetFileWriter v;
org.apache.parquet.internal.column.columnindex.OffsetIndexBuilder v;
org.apache.parquet.internal.column.columnindex.ColumnIndexBuilder v;
org.apache.parquet.bytes.BytesInput v;
int v, v;
v := @this: org.apache.parquet.hadoop.ParquetFileWriter;
v := @parameter: int;
v := @parameter: int;
v := @parameter: org.apache.parquet.bytes.BytesInput;
v := @parameter: org.apache.parquet.column.statistics.Statistics;
v := @parameter: org.apache.parquet.column.Encoding;
v := @parameter: org.apache.parquet.column.Encoding;
v := @parameter: org.apache.parquet.column.Encoding;
v = staticinvoke <org.apache.parquet.internal.column.columnindex.OffsetIndexBuilder: org.apache.parquet.internal.column.columnindex.OffsetIndexBuilder getNoOpBuilder()>();
v.<org.apache.parquet.hadoop.ParquetFileWriter: org.apache.parquet.internal.column.columnindex.OffsetIndexBuilder offsetIndexBuilder> = v;
v = staticinvoke <org.apache.parquet.internal.column.columnindex.ColumnIndexBuilder: org.apache.parquet.internal.column.columnindex.ColumnIndexBuilder getNoOpBuilder()>();
v.<org.apache.parquet.hadoop.ParquetFileWriter: org.apache.parquet.internal.column.columnindex.ColumnIndexBuilder columnIndexBuilder> = v;
specialinvoke v.<org.apache.parquet.hadoop.ParquetFileWriter: void innerWriteDataPage(int,int,org.apache.parquet.bytes.BytesInput,org.apache.parquet.column.statistics.Statistics,org.apache.parquet.column.Encoding,org.apache.parquet.column.Encoding,org.apache.parquet.column.Encoding)>(v, v, v, v, v, v, v);
return;
}
public void writeDataPage(int, int, org.apache.parquet.bytes.BytesInput, org.apache.parquet.column.statistics.Statistics, long, org.apache.parquet.column.Encoding, org.apache.parquet.column.Encoding, org.apache.parquet.column.Encoding) throws java.io.IOException
{
org.apache.parquet.column.Encoding v, v, v;
org.apache.parquet.column.statistics.Statistics v;
org.apache.parquet.hadoop.ParquetFileWriter v;
org.apache.parquet.internal.column.columnindex.OffsetIndexBuilder v;
long v, v, v, v;
org.apache.parquet.bytes.BytesInput v;
int v, v;
org.apache.parquet.io.PositionOutputStream v, v;
v := @this: org.apache.parquet.hadoop.ParquetFileWriter;
v := @parameter: int;
v := @parameter: int;
v := @parameter: org.apache.parquet.bytes.BytesInput;
v := @parameter: org.apache.parquet.column.statistics.Statistics;
v := @parameter: long;
v := @parameter: org.apache.parquet.column.Encoding;
v := @parameter: org.apache.parquet.column.Encoding;
v := @parameter: org.apache.parquet.column.Encoding;
v = v.<org.apache.parquet.hadoop.ParquetFileWriter: org.apache.parquet.io.PositionOutputStream out>;
v = virtualinvoke v.<org.apache.parquet.io.PositionOutputStream: long getPos()>();
specialinvoke v.<org.apache.parquet.hadoop.ParquetFileWriter: void innerWriteDataPage(int,int,org.apache.parquet.bytes.BytesInput,org.apache.parquet.column.statistics.Statistics,org.apache.parquet.column.Encoding,org.apache.parquet.column.Encoding,org.apache.parquet.column.Encoding)>(v, v, v, v, v, v, v);
v = v.<org.apache.parquet.hadoop.ParquetFileWriter: org.apache.parquet.internal.column.columnindex.OffsetIndexBuilder offsetIndexBuilder>;
v = v.<org.apache.parquet.hadoop.ParquetFileWriter: org.apache.parquet.io.PositionOutputStream out>;
v = virtualinvoke v.<org.apache.parquet.io.PositionOutputStream: long getPos()>();
v = v - v;
virtualinvoke v.<org.apache.parquet.internal.column.columnindex.OffsetIndexBuilder: void add(int,long)>(v, v);
return;
}
private void innerWriteDataPage(int, int, org.apache.parquet.bytes.BytesInput, org.apache.parquet.column.statistics.Statistics, org.apache.parquet.column.Encoding, org.apache.parquet.column.Encoding, org.apache.parquet.column.Encoding) throws java.io.IOException
{
byte[] v;
org.apache.parquet.column.Encoding v, v, v;
org.apache.parquet.format.converter.ParquetMetadataConverter v, v;
java.lang.Integer v, v;
byte v;
java.lang.Long v, v;
org.apache.parquet.bytes.BytesInput v;
boolean v;
java.util.Set v, v, v;
org.apache.parquet.hadoop.ParquetFileWriter$STATE v, v;
long v, v, v, v, v, v, v, v, v, v, v, v, v;
org.apache.parquet.internal.column.columnindex.ColumnIndexBuilder v;
int v, v, v;
org.apache.parquet.column.EncodingStats$Builder v;
org.apache.parquet.column.statistics.Statistics v, v, v, v;
org.slf4j.Logger v, v;
org.apache.parquet.hadoop.ParquetFileWriter v;
java.util.zip.CRC32 v, v, v;
org.apache.parquet.io.PositionOutputStream v, v, v, v, v, v;
v := @this: org.apache.parquet.hadoop.ParquetFileWriter;
v := @parameter: int;
v := @parameter: int;
v := @parameter: org.apache.parquet.bytes.BytesInput;
v := @parameter: org.apache.parquet.column.statistics.Statistics;
v := @parameter: org.apache.parquet.column.Encoding;
v := @parameter: org.apache.parquet.column.Encoding;
v := @parameter: org.apache.parquet.column.Encoding;
v = v.<org.apache.parquet.hadoop.ParquetFileWriter: org.apache.parquet.hadoop.ParquetFileWriter$STATE state>;
v = virtualinvoke v.<org.apache.parquet.hadoop.ParquetFileWriter$STATE: org.apache.parquet.hadoop.ParquetFileWriter$STATE write()>();
v.<org.apache.parquet.hadoop.ParquetFileWriter: org.apache.parquet.hadoop.ParquetFileWriter$STATE state> = v;
v = v.<org.apache.parquet.hadoop.ParquetFileWriter: org.apache.parquet.io.PositionOutputStream out>;
v = virtualinvoke v.<org.apache.parquet.io.PositionOutputStream: long getPos()>();
v = v.<org.apache.parquet.hadoop.ParquetFileWriter: long currentChunkFirstDataPage>;
v = v cmp 0L;
if v >= 0 goto label;
v.<org.apache.parquet.hadoop.ParquetFileWriter: long currentChunkFirstDataPage> = v;
label:
v = <org.apache.parquet.hadoop.ParquetFileWriter: org.slf4j.Logger LOG>;
v = staticinvoke <java.lang.Long: java.lang.Long valueOf(long)>(v);
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(v);
interfaceinvoke v.<org.slf4j.Logger: void debug(java.lang.String,java.lang.Object,java.lang.Object)>("{}: write data page: {} values", v, v);
v = virtualinvoke v.<org.apache.parquet.bytes.BytesInput: long size()>();
v = v.<org.apache.parquet.hadoop.ParquetFileWriter: boolean pageWriteChecksumEnabled>;
if v == 0 goto label;
v = v.<org.apache.parquet.hadoop.ParquetFileWriter: java.util.zip.CRC32 crc>;
virtualinvoke v.<java.util.zip.CRC32: void reset()>();
v = v.<org.apache.parquet.hadoop.ParquetFileWriter: java.util.zip.CRC32 crc>;
v = virtualinvoke v.<org.apache.parquet.bytes.BytesInput: byte[] toByteArray()>();
virtualinvoke v.<java.util.zip.CRC32: void update(byte[])>(v);
v = v.<org.apache.parquet.hadoop.ParquetFileWriter: org.apache.parquet.format.converter.ParquetMetadataConverter metadataConverter>;
v = v.<org.apache.parquet.hadoop.ParquetFileWriter: java.util.zip.CRC32 crc>;
v = virtualinvoke v.<java.util.zip.CRC32: long getValue()>();
v = v.<org.apache.parquet.hadoop.ParquetFileWriter: org.apache.parquet.io.PositionOutputStream out>;
virtualinvoke v.<org.apache.parquet.format.converter.ParquetMetadataConverter: void writeDataPageV1Header(int,int,int,org.apache.parquet.column.Encoding,org.apache.parquet.column.Encoding,org.apache.parquet.column.Encoding,int,java.io.OutputStream)>(v, v, v, v, v, v, v, v);
goto label;
label:
v = v.<org.apache.parquet.hadoop.ParquetFileWriter: org.apache.parquet.format.converter.ParquetMetadataConverter metadataConverter>;
v = v.<org.apache.parquet.hadoop.ParquetFileWriter: org.apache.parquet.io.PositionOutputStream out>;
virtualinvoke v.<org.apache.parquet.format.converter.ParquetMetadataConverter: void writeDataPageV1Header(int,int,int,org.apache.parquet.column.Encoding,org.apache.parquet.column.Encoding,org.apache.parquet.column.Encoding,java.io.OutputStream)>(v, v, v, v, v, v, v);
label:
v = v.<org.apache.parquet.hadoop.ParquetFileWriter: org.apache.parquet.io.PositionOutputStream out>;
v = virtualinvoke v.<org.apache.parquet.io.PositionOutputStream: long getPos()>();
v = v - v;
v = v.<org.apache.parquet.hadoop.ParquetFileWriter: long uncompressedLength>;
v = v + v;
v = v + v;
v.<org.apache.parquet.hadoop.ParquetFileWriter: long uncompressedLength> = v;
v = v.<org.apache.parquet.hadoop.ParquetFileWriter: long compressedLength>;
v = v + v;
v = v + v;
v.<org.apache.parquet.hadoop.ParquetFileWriter: long compressedLength> = v;
v = <org.apache.parquet.hadoop.ParquetFileWriter: org.slf4j.Logger LOG>;
v = v.<org.apache.parquet.hadoop.ParquetFileWriter: org.apache.parquet.io.PositionOutputStream out>;
v = virtualinvoke v.<org.apache.parquet.io.PositionOutputStream: long getPos()>();
v = staticinvoke <java.lang.Long: java.lang.Long valueOf(long)>(v);
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(v);
interfaceinvoke v.<org.slf4j.Logger: void debug(java.lang.String,java.lang.Object,java.lang.Object)>("{}: write data page content {}", v, v);
v = v.<org.apache.parquet.hadoop.ParquetFileWriter: org.apache.parquet.io.PositionOutputStream out>;
virtualinvoke v.<org.apache.parquet.bytes.BytesInput: void writeAllTo(java.io.OutputStream)>(v);
v = v.<org.apache.parquet.hadoop.ParquetFileWriter: org.apache.parquet.column.statistics.Statistics currentStatistics>;
if v != null goto label;
v = virtualinvoke v.<org.apache.parquet.column.statistics.Statistics: org.apache.parquet.column.statistics.Statistics copy()>();
v.<org.apache.parquet.hadoop.ParquetFileWriter: org.apache.parquet.column.statistics.Statistics currentStatistics> = v;
goto label;
label:
v = v.<org.apache.parquet.hadoop.ParquetFileWriter: org.apache.parquet.column.statistics.Statistics currentStatistics>;
virtualinvoke v.<org.apache.parquet.column.statistics.Statistics: void mergeStatistics(org.apache.parquet.column.statistics.Statistics)>(v);
label:
v = v.<org.apache.parquet.hadoop.ParquetFileWriter: org.apache.parquet.internal.column.columnindex.ColumnIndexBuilder columnIndexBuilder>;
virtualinvoke v.<org.apache.parquet.internal.column.columnindex.ColumnIndexBuilder: void add(org.apache.parquet.column.statistics.Statistics)>(v);
v = v.<org.apache.parquet.hadoop.ParquetFileWriter: org.apache.parquet.column.EncodingStats$Builder encodingStatsBuilder>;
virtualinvoke v.<org.apache.parquet.column.EncodingStats$Builder: org.apache.parquet.column.EncodingStats$Builder addDataEncoding(org.apache.parquet.column.Encoding)>(v);
v = v.<org.apache.parquet.hadoop.ParquetFileWriter: java.util.Set currentEncodings>;
interfaceinvoke v.<java.util.Set: boolean add(java.lang.Object)>(v);
v = v.<org.apache.parquet.hadoop.ParquetFileWriter: java.util.Set currentEncodings>;
interfaceinvoke v.<java.util.Set: boolean add(java.lang.Object)>(v);
v = v.<org.apache.parquet.hadoop.ParquetFileWriter: java.util.Set currentEncodings>;
interfaceinvoke v.<java.util.Set: boolean add(java.lang.Object)>(v);
return;
}
void addBloomFilter(java.lang.String, org.apache.parquet.column.values.bloomfilter.BloomFilter)
{
org.apache.parquet.column.values.bloomfilter.BloomFilter v;
org.apache.parquet.hadoop.ParquetFileWriter v;
java.util.Map v;
java.lang.String v;
v := @this: org.apache.parquet.hadoop.ParquetFileWriter;
v := @parameter: java.lang.String;
v := @parameter: org.apache.parquet.column.values.bloomfilter.BloomFilter;
v = v.<org.apache.parquet.hadoop.ParquetFileWriter: java.util.Map currentBloomFilters>;
interfaceinvoke v.<java.util.Map: java.lang.Object put(java.lang.Object,java.lang.Object)>(v, v);
return;
}
public void writeDataPageV2(int, int, int, org.apache.parquet.bytes.BytesInput, org.apache.parquet.bytes.BytesInput, org.apache.parquet.column.Encoding, org.apache.parquet.bytes.BytesInput, int, org.apache.parquet.column.statistics.Statistics) throws java.io.IOException
{
org.apache.parquet.column.Encoding v;
org.apache.parquet.format.converter.ParquetMetadataConverter v;
byte v;
org.apache.parquet.bytes.BytesInput v, v, v, v;
org.apache.parquet.bytes.BytesInput[] v;
org.apache.parquet.internal.column.columnindex.OffsetIndexBuilder v;
java.util.Set v;
org.apache.parquet.hadoop.ParquetFileWriter$STATE v, v;
long v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v;
org.apache.parquet.internal.column.columnindex.ColumnIndexBuilder v;
int v, v, v, v, v, v, v, v;
org.apache.parquet.column.EncodingStats$Builder v;
org.apache.parquet.column.statistics.Statistics v, v, v, v;
org.apache.parquet.hadoop.ParquetFileWriter v;
org.apache.parquet.io.PositionOutputStream v, v, v, v, v;
v := @this: org.apache.parquet.hadoop.ParquetFileWriter;
v := @parameter: int;
v := @parameter: int;
v := @parameter: int;
v := @parameter: org.apache.parquet.bytes.BytesInput;
v := @parameter: org.apache.parquet.bytes.BytesInput;
v := @parameter: org.apache.parquet.column.Encoding;
v := @parameter: org.apache.parquet.bytes.BytesInput;
v := @parameter: int;
v := @parameter: org.apache.parquet.column.statistics.Statistics;
v = v.<org.apache.parquet.hadoop.ParquetFileWriter: org.apache.parquet.hadoop.ParquetFileWriter$STATE state>;
v = virtualinvoke v.<org.apache.parquet.hadoop.ParquetFileWriter$STATE: org.apache.parquet.hadoop.ParquetFileWriter$STATE write()>();
v.<org.apache.parquet.hadoop.ParquetFileWriter: org.apache.parquet.hadoop.ParquetFileWriter$STATE state> = v;
v = virtualinvoke v.<org.apache.parquet.bytes.BytesInput: long size()>();
v = specialinvoke v.<org.apache.parquet.hadoop.ParquetFileWriter: int toIntWithCheck(long)>(v);
v = virtualinvoke v.<org.apache.parquet.bytes.BytesInput: long size()>();
v = specialinvoke v.<org.apache.parquet.hadoop.ParquetFileWriter: int toIntWithCheck(long)>(v);
v = virtualinvoke v.<org.apache.parquet.bytes.BytesInput: long size()>();
v = virtualinvoke v.<org.apache.parquet.bytes.BytesInput: long size()>();
v = v + v;
v = virtualinvoke v.<org.apache.parquet.bytes.BytesInput: long size()>();
v = v + v;
v = specialinvoke v.<org.apache.parquet.hadoop.ParquetFileWriter: int toIntWithCheck(long)>(v);
v = virtualinvoke v.<org.apache.parquet.bytes.BytesInput: long size()>();
v = v + v;
v = virtualinvoke v.<org.apache.parquet.bytes.BytesInput: long size()>();
v = v + v;
v = specialinvoke v.<org.apache.parquet.hadoop.ParquetFileWriter: int toIntWithCheck(long)>(v);
v = v.<org.apache.parquet.hadoop.ParquetFileWriter: org.apache.parquet.io.PositionOutputStream out>;
v = virtualinvoke v.<org.apache.parquet.io.PositionOutputStream: long getPos()>();
v = v.<org.apache.parquet.hadoop.ParquetFileWriter: long currentChunkFirstDataPage>;
v = v cmp 0L;
if v >= 0 goto label;
v.<org.apache.parquet.hadoop.ParquetFileWriter: long currentChunkFirstDataPage> = v;
label:
v = v.<org.apache.parquet.hadoop.ParquetFileWriter: org.apache.parquet.format.converter.ParquetMetadataConverter metadataConverter>;
v = v.<org.apache.parquet.hadoop.ParquetFileWriter: org.apache.parquet.io.PositionOutputStream out>;
virtualinvoke v.<org.apache.parquet.format.converter.ParquetMetadataConverter: void writeDataPageV2Header(int,int,int,int,int,org.apache.parquet.column.Encoding,int,int,java.io.OutputStream)>(v, v, v, v, v, v, v, v, v);
v = v.<org.apache.parquet.hadoop.ParquetFileWriter: org.apache.parquet.io.PositionOutputStream out>;
v = virtualinvoke v.<org.apache.parquet.io.PositionOutputStream: long getPos()>();
v = v - v;
v = v.<org.apache.parquet.hadoop.ParquetFileWriter: long uncompressedLength>;
v = v + v;
v = v + v;
v.<org.apache.parquet.hadoop.ParquetFileWriter: long uncompressedLength> = v;
v = v.<org.apache.parquet.hadoop.ParquetFileWriter: long compressedLength>;
v = v + v;
v = v + v;
v.<org.apache.parquet.hadoop.ParquetFileWriter: long compressedLength> = v;
v = v.<org.apache.parquet.hadoop.ParquetFileWriter: org.apache.parquet.column.statistics.Statistics currentStatistics>;
if v != null goto label;
v = virtualinvoke v.<org.apache.parquet.column.statistics.Statistics: org.apache.parquet.column.statistics.Statistics copy()>();
v.<org.apache.parquet.hadoop.ParquetFileWriter: org.apache.parquet.column.statistics.Statistics currentStatistics> = v;
goto label;
label:
v = v.<org.apache.parquet.hadoop.ParquetFileWriter: org.apache.parquet.column.statistics.Statistics currentStatistics>;
virtualinvoke v.<org.apache.parquet.column.statistics.Statistics: void mergeStatistics(org.apache.parquet.column.statistics.Statistics)>(v);
label:
v = v.<org.apache.parquet.hadoop.ParquetFileWriter: org.apache.parquet.internal.column.columnindex.ColumnIndexBuilder columnIndexBuilder>;
virtualinvoke v.<org.apache.parquet.internal.column.columnindex.ColumnIndexBuilder: void add(org.apache.parquet.column.statistics.Statistics)>(v);
v = v.<org.apache.parquet.hadoop.ParquetFileWriter: java.util.Set currentEncodings>;
interfaceinvoke v.<java.util.Set: boolean add(java.lang.Object)>(v);
v = v.<org.apache.parquet.hadoop.ParquetFileWriter: org.apache.parquet.column.EncodingStats$Builder encodingStatsBuilder>;
virtualinvoke v.<org.apache.parquet.column.EncodingStats$Builder: org.apache.parquet.column.EncodingStats$Builder addDataEncoding(org.apache.parquet.column.Encoding)>(v);
v = newarray (org.apache.parquet.bytes.BytesInput)[3];
v[0] = v;
v[1] = v;
v[2] = v;
v = staticinvoke <org.apache.parquet.bytes.BytesInput: org.apache.parquet.bytes.BytesInput concat(org.apache.parquet.bytes.BytesInput[])>(v);
v = v.<org.apache.parquet.hadoop.ParquetFileWriter: org.apache.parquet.io.PositionOutputStream out>;
virtualinvoke v.<org.apache.parquet.bytes.BytesInput: void writeAllTo(java.io.OutputStream)>(v);
v = v.<org.apache.parquet.hadoop.ParquetFileWriter: org.apache.parquet.internal.column.columnindex.OffsetIndexBuilder offsetIndexBuilder>;
v = v.<org.apache.parquet.hadoop.ParquetFileWriter: org.apache.parquet.io.PositionOutputStream out>;
v = virtualinvoke v.<org.apache.parquet.io.PositionOutputStream: long getPos()>();
v = v - v;
virtualinvoke v.<org.apache.parquet.internal.column.columnindex.OffsetIndexBuilder: void add(int,long)>(v, v);
return;
}
void writeColumnChunk(org.apache.parquet.column.ColumnDescriptor, long, org.apache.parquet.hadoop.metadata.CompressionCodecName, org.apache.parquet.column.page.DictionaryPage, org.apache.parquet.bytes.BytesInput, long, long, org.apache.parquet.column.statistics.Statistics, org.apache.parquet.internal.column.columnindex.ColumnIndexBuilder, org.apache.parquet.internal.column.columnindex.OffsetIndexBuilder, org.apache.parquet.column.values.bloomfilter.BloomFilter, java.util.Set, java.util.Set, java.util.List) throws java.io.IOException
{
org.apache.parquet.column.statistics.Statistics v;
org.apache.parquet.hadoop.ParquetFileWriter v;
org.apache.parquet.internal.column.columnindex.OffsetIndexBuilder v;
java.util.Set v, v;
long v, v, v;
org.apache.parquet.column.page.DictionaryPage v;
org.apache.parquet.internal.column.columnindex.ColumnIndexBuilder v;
java.util.List v;
org.apache.parquet.column.values.bloomfilter.BloomFilter v;
org.apache.parquet.column.ColumnDescriptor v;
org.apache.parquet.bytes.BytesInput v;
org.apache.parquet.hadoop.metadata.CompressionCodecName v;
v := @this: org.apache.parquet.hadoop.ParquetFileWriter;
v := @parameter: org.apache.parquet.column.ColumnDescriptor;
v := @parameter: long;
v := @parameter: org.apache.parquet.hadoop.metadata.CompressionCodecName;
v := @parameter: org.apache.parquet.column.page.DictionaryPage;
v := @parameter: org.apache.parquet.bytes.BytesInput;
v := @parameter: long;
v := @parameter: long;
v := @parameter: org.apache.parquet.column.statistics.Statistics;
v := @parameter: org.apache.parquet.internal.column.columnindex.ColumnIndexBuilder;
v := @parameter: org.apache.parquet.internal.column.columnindex.OffsetIndexBuilder;
v := @parameter: org.apache.parquet.column.values.bloomfilter.BloomFilter;
v := @parameter: java.util.Set;
v := @parameter: java.util.Set;
v := @parameter: java.util.List;
virtualinvoke v.<org.apache.parquet.hadoop.ParquetFileWriter: void writeColumnChunk(org.apache.parquet.column.ColumnDescriptor,long,org.apache.parquet.hadoop.metadata.CompressionCodecName,org.apache.parquet.column.page.DictionaryPage,org.apache.parquet.bytes.BytesInput,long,long,org.apache.parquet.column.statistics.Statistics,org.apache.parquet.internal.column.columnindex.ColumnIndexBuilder,org.apache.parquet.internal.column.columnindex.OffsetIndexBuilder,org.apache.parquet.column.values.bloomfilter.BloomFilter,java.util.Set,java.util.Set,java.util.List,org.apache.parquet.format.BlockCipher$Encryptor,int,int,byte[])>(v, v, v, v, v, v, v, v, v, v, v, v, v, v, null, 0, 0, null);
return;
}
void writeColumnChunk(org.apache.parquet.column.ColumnDescriptor, long, org.apache.parquet.hadoop.metadata.CompressionCodecName, org.apache.parquet.column.page.DictionaryPage, org.apache.parquet.bytes.BytesInput, long, long, org.apache.parquet.column.statistics.Statistics, org.apache.parquet.internal.column.columnindex.ColumnIndexBuilder, org.apache.parquet.internal.column.columnindex.OffsetIndexBuilder, org.apache.parquet.column.values.bloomfilter.BloomFilter, java.util.Set, java.util.Set, java.util.List, org.apache.parquet.format.BlockCipher$Encryptor, int, int, byte[]) throws java.io.IOException
{
byte[] v, v;
org.apache.parquet.column.Encoding v;
java.lang.Long v, v;
java.util.Map v;
org.apache.parquet.bytes.BytesInput v;
org.apache.parquet.format.BlockCipher$Encryptor v;
boolean v, v, v;
org.apache.parquet.internal.column.columnindex.OffsetIndexBuilder v;
java.util.Set v, v, v, v, v;
org.apache.parquet.hadoop.ParquetFileWriter$STATE v, v;
java.util.List v;
org.apache.parquet.crypto.ModuleCipherFactory$ModuleType v;
org.apache.parquet.column.ColumnDescriptor v;
org.apache.parquet.hadoop.metadata.CompressionCodecName v;
java.lang.String[] v;
long v, v, v, v, v, v, v, v, v, v, v, v, v, v;
org.apache.parquet.column.page.DictionaryPage v;
org.apache.parquet.internal.column.columnindex.ColumnIndexBuilder v;
org.apache.parquet.column.values.bloomfilter.BloomFilter v;
int v, v, v;
java.lang.String v;
org.apache.parquet.column.EncodingStats$Builder v, v;
org.apache.parquet.column.statistics.Statistics v;
org.slf4j.Logger v, v;
java.util.Iterator v;
org.apache.parquet.hadoop.ParquetFileWriter v;
java.lang.Object v;
org.apache.parquet.io.PositionOutputStream v, v, v, v;
v := @this: org.apache.parquet.hadoop.ParquetFileWriter;
v := @parameter: org.apache.parquet.column.ColumnDescriptor;
v := @parameter: long;
v := @parameter: org.apache.parquet.hadoop.metadata.CompressionCodecName;
v := @parameter: org.apache.parquet.column.page.DictionaryPage;
v := @parameter: org.apache.parquet.bytes.BytesInput;
v := @parameter: long;
v := @parameter: long;
v := @parameter: org.apache.parquet.column.statistics.Statistics;
v := @parameter: org.apache.parquet.internal.column.columnindex.ColumnIndexBuilder;
v := @parameter: org.apache.parquet.internal.column.columnindex.OffsetIndexBuilder;
v := @parameter: org.apache.parquet.column.values.bloomfilter.BloomFilter;
v := @parameter: java.util.Set;
v := @parameter: java.util.Set;
v := @parameter: java.util.List;
v := @parameter: org.apache.parquet.format.BlockCipher$Encryptor;
v := @parameter: int;
v := @parameter: int;
v := @parameter: byte[];
virtualinvoke v.<org.apache.parquet.hadoop.ParquetFileWriter: void startColumn(org.apache.parquet.column.ColumnDescriptor,long,org.apache.parquet.hadoop.metadata.CompressionCodecName)>(v, v, v);
v = v.<org.apache.parquet.hadoop.ParquetFileWriter: org.apache.parquet.hadoop.ParquetFileWriter$STATE state>;
v = virtualinvoke v.<org.apache.parquet.hadoop.ParquetFileWriter$STATE: org.apache.parquet.hadoop.ParquetFileWriter$STATE write()>();
v.<org.apache.parquet.hadoop.ParquetFileWriter: org.apache.parquet.hadoop.ParquetFileWriter$STATE state> = v;
if v == null goto label;
v = null;
if null == v goto label;
v = <org.apache.parquet.crypto.ModuleCipherFactory$ModuleType: org.apache.parquet.crypto.ModuleCipherFactory$ModuleType DictionaryPageHeader>;
v = (int) -1;
v = staticinvoke <org.apache.parquet.crypto.AesCipher: byte[] createModuleAAD(byte[],org.apache.parquet.crypto.ModuleCipherFactory$ModuleType,int,int,int)>(v, v, v, v, v);
label:
virtualinvoke v.<org.apache.parquet.hadoop.ParquetFileWriter: void writeDictionaryPage(org.apache.parquet.column.page.DictionaryPage,org.apache.parquet.format.BlockCipher$Encryptor,byte[])>(v, v, v);
label:
if v == null goto label;
v = 0;
v = interfaceinvoke v.<java.util.List: java.util.Iterator iterator()>();
label:
v = interfaceinvoke v.<java.util.Iterator: boolean hasNext()>();
if v == 0 goto label;
v = interfaceinvoke v.<java.util.Iterator: java.lang.Object next()>();
v = <org.apache.parquet.column.Encoding: org.apache.parquet.column.Encoding RLE_DICTIONARY>;
if v == v goto label;
v = 1;
label:
if v == 0 goto label;
v = v.<org.apache.parquet.hadoop.ParquetFileWriter: java.util.Map currentBloomFilters>;
v = virtualinvoke v.<org.apache.parquet.column.ColumnDescriptor: java.lang.String[] getPath()>();
v = staticinvoke <java.lang.String: java.lang.String join(java.lang.CharSequence,java.lang.CharSequence[])>(".", v);
interfaceinvoke v.<java.util.Map: java.lang.Object put(java.lang.Object,java.lang.Object)>(v, v);
label:
v = <org.apache.parquet.hadoop.ParquetFileWriter: org.slf4j.Logger LOG>;
v = v.<org.apache.parquet.hadoop.ParquetFileWriter: org.apache.parquet.io.PositionOutputStream out>;
v = virtualinvoke v.<org.apache.parquet.io.PositionOutputStream: long getPos()>();
v = staticinvoke <java.lang.Long: java.lang.Long valueOf(long)>(v);
interfaceinvoke v.<org.slf4j.Logger: void debug(java.lang.String,java.lang.Object)>("{}: write data pages", v);
v = virtualinvoke v.<org.apache.parquet.bytes.BytesInput: long size()>();
v = v - v;
v = v.<org.apache.parquet.hadoop.ParquetFileWriter: long uncompressedLength>;
v = v + v;
v = v + v;
v.<org.apache.parquet.hadoop.ParquetFileWriter: long uncompressedLength> = v;
v = v.<org.apache.parquet.hadoop.ParquetFileWriter: long compressedLength>;
v = v + v;
v = v + v;
v.<org.apache.parquet.hadoop.ParquetFileWriter: long compressedLength> = v;
v = <org.apache.parquet.hadoop.ParquetFileWriter: org.slf4j.Logger LOG>;
v = v.<org.apache.parquet.hadoop.ParquetFileWriter: org.apache.parquet.io.PositionOutputStream out>;
v = virtualinvoke v.<org.apache.parquet.io.PositionOutputStream: long getPos()>();
v = staticinvoke <java.lang.Long: java.lang.Long valueOf(long)>(v);
interfaceinvoke v.<org.slf4j.Logger: void debug(java.lang.String,java.lang.Object)>("{}: write data pages content", v);
v = v.<org.apache.parquet.hadoop.ParquetFileWriter: org.apache.parquet.io.PositionOutputStream out>;
v = virtualinvoke v.<org.apache.parquet.io.PositionOutputStream: long getPos()>();
v.<org.apache.parquet.hadoop.ParquetFileWriter: long currentChunkFirstDataPage> = v;
v = v.<org.apache.parquet.hadoop.ParquetFileWriter: org.apache.parquet.io.PositionOutputStream out>;
virtualinvoke v.<org.apache.parquet.bytes.BytesInput: void writeAllTo(java.io.OutputStream)>(v);
v = v.<org.apache.parquet.hadoop.ParquetFileWriter: org.apache.parquet.column.EncodingStats$Builder encodingStatsBuilder>;
virtualinvoke v.<org.apache.parquet.column.EncodingStats$Builder: org.apache.parquet.column.EncodingStats$Builder addDataEncodings(java.util.Collection)>(v);
v = interfaceinvoke v.<java.util.Set: boolean isEmpty()>();
if v == 0 goto label;
v = v.<org.apache.parquet.hadoop.ParquetFileWriter: org.apache.parquet.column.EncodingStats$Builder encodingStatsBuilder>;
virtualinvoke v.<org.apache.parquet.column.EncodingStats$Builder: org.apache.parquet.column.EncodingStats$Builder withV2Pages()>();
label:
v = v.<org.apache.parquet.hadoop.ParquetFileWriter: java.util.Set currentEncodings>;
interfaceinvoke v.<java.util.Set: boolean addAll(java.util.Collection)>(v);
v = v.<org.apache.parquet.hadoop.ParquetFileWriter: java.util.Set currentEncodings>;
interfaceinvoke v.<java.util.Set: boolean addAll(java.util.Collection)>(v);
v = v.<org.apache.parquet.hadoop.ParquetFileWriter: java.util.Set currentEncodings>;
interfaceinvoke v.<java.util.Set: boolean addAll(java.util.Collection)>(v);
v.<org.apache.parquet.hadoop.ParquetFileWriter: org.apache.parquet.column.statistics.Statistics currentStatistics> = v;
v.<org.apache.parquet.hadoop.ParquetFileWriter: org.apache.parquet.internal.column.columnindex.ColumnIndexBuilder columnIndexBuilder> = v;
v.<org.apache.parquet.hadoop.ParquetFileWriter: org.apache.parquet.internal.column.columnindex.OffsetIndexBuilder offsetIndexBuilder> = v;
virtualinvoke v.<org.apache.parquet.hadoop.ParquetFileWriter: void endColumn()>();
return;
}
public void endColumn() throws java.io.IOException
{
org.apache.parquet.schema.PrimitiveType v;
org.apache.parquet.internal.column.columnindex.ColumnIndex v;
org.apache.parquet.internal.column.columnindex.OffsetIndex v;
byte v;
java.lang.Long v;
org.apache.parquet.internal.column.columnindex.OffsetIndexBuilder v;
java.util.Set v;
org.apache.parquet.hadoop.metadata.ColumnChunkMetaData v;
org.apache.parquet.hadoop.ParquetFileWriter$STATE v, v;
java.util.List v, v, v;
org.apache.parquet.column.EncodingStats v;
org.apache.parquet.hadoop.metadata.BlockMetaData v, v, v;
org.apache.parquet.hadoop.metadata.CompressionCodecName v;
org.apache.parquet.hadoop.metadata.ColumnPath v;
long v, v, v, v, v, v, v, v, v, v, v, v;
org.apache.parquet.internal.column.columnindex.ColumnIndexBuilder v, v, v;
int v;
org.apache.parquet.column.EncodingStats$Builder v;
org.apache.parquet.column.statistics.Statistics v;
org.slf4j.Logger v;
org.apache.parquet.hadoop.ParquetFileWriter v;
org.apache.parquet.io.PositionOutputStream v;
v := @this: org.apache.parquet.hadoop.ParquetFileWriter;
v = v.<org.apache.parquet.hadoop.ParquetFileWriter: org.apache.parquet.hadoop.ParquetFileWriter$STATE state>;
v = virtualinvoke v.<org.apache.parquet.hadoop.ParquetFileWriter$STATE: org.apache.parquet.hadoop.ParquetFileWriter$STATE endColumn()>();
v.<org.apache.parquet.hadoop.ParquetFileWriter: org.apache.parquet.hadoop.ParquetFileWriter$STATE state> = v;
v = <org.apache.parquet.hadoop.ParquetFileWriter: org.slf4j.Logger LOG>;
v = v.<org.apache.parquet.hadoop.ParquetFileWriter: org.apache.parquet.io.PositionOutputStream out>;
v = virtualinvoke v.<org.apache.parquet.io.PositionOutputStream: long getPos()>();
v = staticinvoke <java.lang.Long: java.lang.Long valueOf(long)>(v);
interfaceinvoke v.<org.slf4j.Logger: void debug(java.lang.String,java.lang.Object)>("{}: end column", v);
v = v.<org.apache.parquet.hadoop.ParquetFileWriter: org.apache.parquet.internal.column.columnindex.ColumnIndexBuilder columnIndexBuilder>;
v = virtualinvoke v.<org.apache.parquet.internal.column.columnindex.ColumnIndexBuilder: long getMinMaxSize()>();
v = v.<org.apache.parquet.hadoop.ParquetFileWriter: org.apache.parquet.internal.column.columnindex.ColumnIndexBuilder columnIndexBuilder>;
v = virtualinvoke v.<org.apache.parquet.internal.column.columnindex.ColumnIndexBuilder: int getPageCount()>();
v = v * 4096L;
v = v cmp v;
if v <= 0 goto label;
v = v.<org.apache.parquet.hadoop.ParquetFileWriter: java.util.List currentColumnIndexes>;
interfaceinvoke v.<java.util.List: boolean add(java.lang.Object)>(null);
goto label;
label:
v = v.<org.apache.parquet.hadoop.ParquetFileWriter: java.util.List currentColumnIndexes>;
v = v.<org.apache.parquet.hadoop.ParquetFileWriter: org.apache.parquet.internal.column.columnindex.ColumnIndexBuilder columnIndexBuilder>;
v = virtualinvoke v.<org.apache.parquet.internal.column.columnindex.ColumnIndexBuilder: org.apache.parquet.internal.column.columnindex.ColumnIndex build()>();
interfaceinvoke v.<java.util.List: boolean add(java.lang.Object)>(v);
label:
v = v.<org.apache.parquet.hadoop.ParquetFileWriter: java.util.List currentOffsetIndexes>;
v = v.<org.apache.parquet.hadoop.ParquetFileWriter: org.apache.parquet.internal.column.columnindex.OffsetIndexBuilder offsetIndexBuilder>;
v = v.<org.apache.parquet.hadoop.ParquetFileWriter: long currentChunkFirstDataPage>;
v = virtualinvoke v.<org.apache.parquet.internal.column.columnindex.OffsetIndexBuilder: org.apache.parquet.internal.column.columnindex.OffsetIndex build(long)>(v);
interfaceinvoke v.<java.util.List: boolean add(java.lang.Object)>(v);
v = v.<org.apache.parquet.hadoop.ParquetFileWriter: org.apache.parquet.hadoop.metadata.BlockMetaData currentBlock>;
v = v.<org.apache.parquet.hadoop.ParquetFileWriter: org.apache.parquet.hadoop.metadata.ColumnPath currentChunkPath>;
v = v.<org.apache.parquet.hadoop.ParquetFileWriter: org.apache.parquet.schema.PrimitiveType currentChunkType>;
v = v.<org.apache.parquet.hadoop.ParquetFileWriter: org.apache.parquet.hadoop.metadata.CompressionCodecName currentChunkCodec>;
v = v.<org.apache.parquet.hadoop.ParquetFileWriter: org.apache.parquet.column.EncodingStats$Builder encodingStatsBuilder>;
v = virtualinvoke v.<org.apache.parquet.column.EncodingStats$Builder: org.apache.parquet.column.EncodingStats build()>();
v = v.<org.apache.parquet.hadoop.ParquetFileWriter: java.util.Set currentEncodings>;
v = v.<org.apache.parquet.hadoop.ParquetFileWriter: org.apache.parquet.column.statistics.Statistics currentStatistics>;
v = v.<org.apache.parquet.hadoop.ParquetFileWriter: long currentChunkFirstDataPage>;
v = v.<org.apache.parquet.hadoop.ParquetFileWriter: long currentChunkDictionaryPageOffset>;
v = v.<org.apache.parquet.hadoop.ParquetFileWriter: long currentChunkValueCount>;
v = v.<org.apache.parquet.hadoop.ParquetFileWriter: long compressedLength>;
v = v.<org.apache.parquet.hadoop.ParquetFileWriter: long uncompressedLength>;
v = staticinvoke <org.apache.parquet.hadoop.metadata.ColumnChunkMetaData: org.apache.parquet.hadoop.metadata.ColumnChunkMetaData get(org.apache.parquet.hadoop.metadata.ColumnPath,org.apache.parquet.schema.PrimitiveType,org.apache.parquet.hadoop.metadata.CompressionCodecName,org.apache.parquet.column.EncodingStats,java.util.Set,org.apache.parquet.column.statistics.Statistics,long,long,long,long,long)>(v, v, v, v, v, v, v, v, v, v, v);
virtualinvoke v.<org.apache.parquet.hadoop.metadata.BlockMetaData: void addColumn(org.apache.parquet.hadoop.metadata.ColumnChunkMetaData)>(v);
v = v.<org.apache.parquet.hadoop.ParquetFileWriter: org.apache.parquet.hadoop.metadata.BlockMetaData currentBlock>;
v = v.<org.apache.parquet.hadoop.ParquetFileWriter: org.apache.parquet.hadoop.metadata.BlockMetaData currentBlock>;
v = virtualinvoke v.<org.apache.parquet.hadoop.metadata.BlockMetaData: long getTotalByteSize()>();
v = v.<org.apache.parquet.hadoop.ParquetFileWriter: long uncompressedLength>;
v = v + v;
virtualinvoke v.<org.apache.parquet.hadoop.metadata.BlockMetaData: void setTotalByteSize(long)>(v);
v.<org.apache.parquet.hadoop.ParquetFileWriter: long uncompressedLength> = 0L;
v.<org.apache.parquet.hadoop.ParquetFileWriter: long compressedLength> = 0L;
v.<org.apache.parquet.hadoop.ParquetFileWriter: org.apache.parquet.internal.column.columnindex.ColumnIndexBuilder columnIndexBuilder> = null;
v.<org.apache.parquet.hadoop.ParquetFileWriter: org.apache.parquet.internal.column.columnindex.OffsetIndexBuilder offsetIndexBuilder> = null;
return;
}
public void endBlock() throws java.io.IOException
{
long v, v;
java.lang.Long v;
java.util.Map v;
int v;
org.slf4j.Logger v;
org.apache.parquet.hadoop.ParquetFileWriter v;
org.apache.parquet.hadoop.ParquetFileWriter$STATE v, v;
java.util.List v, v, v, v, v, v, v;
org.apache.parquet.hadoop.metadata.BlockMetaData v, v, v;
org.apache.parquet.io.PositionOutputStream v;
v := @this: org.apache.parquet.hadoop.ParquetFileWriter;
v = v.<org.apache.parquet.hadoop.ParquetFileWriter: org.apache.parquet.hadoop.ParquetFileWriter$STATE state>;
v = virtualinvoke v.<org.apache.parquet.hadoop.ParquetFileWriter$STATE: org.apache.parquet.hadoop.ParquetFileWriter$STATE endBlock()>();
v.<org.apache.parquet.hadoop.ParquetFileWriter: org.apache.parquet.hadoop.ParquetFileWriter$STATE state> = v;
v = <org.apache.parquet.hadoop.ParquetFileWriter: org.slf4j.Logger LOG>;
v = v.<org.apache.parquet.hadoop.ParquetFileWriter: org.apache.parquet.io.PositionOutputStream out>;
v = virtualinvoke v.<org.apache.parquet.io.PositionOutputStream: long getPos()>();
v = staticinvoke <java.lang.Long: java.lang.Long valueOf(long)>(v);
interfaceinvoke v.<org.slf4j.Logger: void debug(java.lang.String,java.lang.Object)>("{}: end block", v);
v = v.<org.apache.parquet.hadoop.ParquetFileWriter: org.apache.parquet.hadoop.metadata.BlockMetaData currentBlock>;
v = v.<org.apache.parquet.hadoop.ParquetFileWriter: long currentRecordCount>;
virtualinvoke v.<org.apache.parquet.hadoop.metadata.BlockMetaData: void setRowCount(long)>(v);
v = v.<org.apache.parquet.hadoop.ParquetFileWriter: org.apache.parquet.hadoop.metadata.BlockMetaData currentBlock>;
v = v.<org.apache.parquet.hadoop.ParquetFileWriter: java.util.List blocks>;
v = interfaceinvoke v.<java.util.List: int size()>();
virtualinvoke v.<org.apache.parquet.hadoop.metadata.BlockMetaData: void setOrdinal(int)>(v);
v = v.<org.apache.parquet.hadoop.ParquetFileWriter: java.util.List blocks>;
v = v.<org.apache.parquet.hadoop.ParquetFileWriter: org.apache.parquet.hadoop.metadata.BlockMetaData currentBlock>;
interfaceinvoke v.<java.util.List: boolean add(java.lang.Object)>(v);
v = v.<org.apache.parquet.hadoop.ParquetFileWriter: java.util.List columnIndexes>;
v = v.<org.apache.parquet.hadoop.ParquetFileWriter: java.util.List currentColumnIndexes>;
interfaceinvoke v.<java.util.List: boolean add(java.lang.Object)>(v);
v = v.<org.apache.parquet.hadoop.ParquetFileWriter: java.util.List offsetIndexes>;
v = v.<org.apache.parquet.hadoop.ParquetFileWriter: java.util.List currentOffsetIndexes>;
interfaceinvoke v.<java.util.List: boolean add(java.lang.Object)>(v);
v = v.<org.apache.parquet.hadoop.ParquetFileWriter: java.util.List bloomFilters>;
v = v.<org.apache.parquet.hadoop.ParquetFileWriter: java.util.Map currentBloomFilters>;
interfaceinvoke v.<java.util.List: boolean add(java.lang.Object)>(v);
v.<org.apache.parquet.hadoop.ParquetFileWriter: java.util.List currentColumnIndexes> = null;
v.<org.apache.parquet.hadoop.ParquetFileWriter: java.util.List currentOffsetIndexes> = null;
v.<org.apache.parquet.hadoop.ParquetFileWriter: java.util.Map currentBloomFilters> = null;
v.<org.apache.parquet.hadoop.ParquetFileWriter: org.apache.parquet.hadoop.metadata.BlockMetaData currentBlock> = null;
return;
}
public void appendFile(oadd.org.apache.hadoop.conf.Configuration, oadd.org.apache.hadoop.fs.Path) throws java.io.IOException
{
org.apache.parquet.hadoop.ParquetFileReader v;
org.apache.parquet.hadoop.ParquetFileWriter v;
oadd.org.apache.hadoop.conf.Configuration v;
oadd.org.apache.hadoop.fs.Path v;
v := @this: org.apache.parquet.hadoop.ParquetFileWriter;
v := @parameter: oadd.org.apache.hadoop.conf.Configuration;
v := @parameter: oadd.org.apache.hadoop.fs.Path;
v = staticinvoke <org.apache.parquet.hadoop.ParquetFileReader: org.apache.parquet.hadoop.ParquetFileReader open(oadd.org.apache.hadoop.conf.Configuration,oadd.org.apache.hadoop.fs.Path)>(v, v);
virtualinvoke v.<org.apache.parquet.hadoop.ParquetFileReader: void appendTo(org.apache.parquet.hadoop.ParquetFileWriter)>(v);
return;
}
public void appendFile(org.apache.parquet.io.InputFile) throws java.io.IOException
{
java.lang.Throwable v;
org.apache.parquet.hadoop.ParquetFileReader v;
org.apache.parquet.hadoop.ParquetFileWriter v;
org.apache.parquet.io.InputFile v;
v := @this: org.apache.parquet.hadoop.ParquetFileWriter;
v := @parameter: org.apache.parquet.io.InputFile;
v = staticinvoke <org.apache.parquet.hadoop.ParquetFileReader: org.apache.parquet.hadoop.ParquetFileReader open(org.apache.parquet.io.InputFile)>(v);
label:
virtualinvoke v.<org.apache.parquet.hadoop.ParquetFileReader: void appendTo(org.apache.parquet.hadoop.ParquetFileWriter)>(v);
label:
virtualinvoke v.<org.apache.parquet.hadoop.ParquetFileReader: void close()>();
goto label;
label:
v := @caughtexception;
throw v;
label:
return;
catch java.lang.Throwable from label to label with label;
}
public void appendRowGroups(oadd.org.apache.hadoop.fs.FSDataInputStream, java.util.List, boolean) throws java.io.IOException
{
java.util.List v;
oadd.org.apache.hadoop.fs.FSDataInputStream v;
org.apache.parquet.hadoop.ParquetFileWriter v;
org.apache.parquet.io.SeekableInputStream v;
boolean v;
v := @this: org.apache.parquet.hadoop.ParquetFileWriter;
v := @parameter: oadd.org.apache.hadoop.fs.FSDataInputStream;
v := @parameter: java.util.List;
v := @parameter: boolean;
v = staticinvoke <org.apache.parquet.hadoop.util.HadoopStreams: org.apache.parquet.io.SeekableInputStream wrap(oadd.org.apache.hadoop.fs.FSDataInputStream)>(v);
virtualinvoke v.<org.apache.parquet.hadoop.ParquetFileWriter: void appendRowGroups(org.apache.parquet.io.SeekableInputStream,java.util.List,boolean)>(v, v, v);
return;
}
public void appendRowGroups(org.apache.parquet.io.SeekableInputStream, java.util.List, boolean) throws java.io.IOException
{
java.util.List v;
java.util.Iterator v;
org.apache.parquet.hadoop.ParquetFileWriter v;
org.apache.parquet.io.SeekableInputStream v;
java.lang.Object v;
boolean v, v;
v := @this: org.apache.parquet.hadoop.ParquetFileWriter;
v := @parameter: org.apache.parquet.io.SeekableInputStream;
v := @parameter: java.util.List;
v := @parameter: boolean;
v = interfaceinvoke v.<java.util.List: java.util.Iterator iterator()>();
label:
v = interfaceinvoke v.<java.util.Iterator: boolean hasNext()>();
if v == 0 goto label;
v = interfaceinvoke v.<java.util.Iterator: java.lang.Object next()>();
virtualinvoke v.<org.apache.parquet.hadoop.ParquetFileWriter: void appendRowGroup(org.apache.parquet.io.SeekableInputStream,org.apache.parquet.hadoop.metadata.BlockMetaData,boolean)>(v, v, v);
goto label;
label:
return;
}
public void appendRowGroup(oadd.org.apache.hadoop.fs.FSDataInputStream, org.apache.parquet.hadoop.metadata.BlockMetaData, boolean) throws java.io.IOException
{
oadd.org.apache.hadoop.fs.FSDataInputStream v;
org.apache.parquet.hadoop.ParquetFileWriter v;
org.apache.parquet.hadoop.metadata.BlockMetaData v;
org.apache.parquet.io.SeekableInputStream v;
boolean v;
v := @this: org.apache.parquet.hadoop.ParquetFileWriter;
v := @parameter: oadd.org.apache.hadoop.fs.FSDataInputStream;
v := @parameter: org.apache.parquet.hadoop.metadata.BlockMetaData;
v := @parameter: boolean;
v = staticinvoke <org.apache.parquet.hadoop.util.HadoopStreams: org.apache.parquet.io.SeekableInputStream wrap(oadd.org.apache.hadoop.fs.FSDataInputStream)>(v);
virtualinvoke v.<org.apache.parquet.hadoop.ParquetFileWriter: void appendRowGroup(org.apache.parquet.io.SeekableInputStream,org.apache.parquet.hadoop.metadata.BlockMetaData,boolean)>(v, v, v);
return;
}
public void appendRowGroup(org.apache.parquet.io.SeekableInputStream, org.apache.parquet.hadoop.metadata.BlockMetaData, boolean) throws java.io.IOException
{
byte v, v;
java.util.Set v, v;
org.apache.parquet.column.EncodingStats v;
org.apache.parquet.hadoop.metadata.BlockMetaData v, v, v;
org.apache.parquet.hadoop.metadata.CompressionCodecName v;
java.lang.Object[] v, v;
long v, v, v, v, v, v, v, v, v, v, v, v, v, v, v;
java.util.ArrayList v;
org.apache.parquet.io.SeekableInputStream v;
java.lang.String v, v, v, v, v;
org.apache.parquet.hadoop.Offsets v;
org.apache.parquet.hadoop.ParquetFileWriter v;
org.apache.parquet.schema.PrimitiveType v;
boolean v, v, v, v;
org.apache.parquet.hadoop.metadata.ColumnChunkMetaData v;
org.apache.parquet.schema.MessageType v;
java.util.List v, v, v, v;
org.apache.parquet.hadoop.metadata.ColumnPath v, v, v;
java.lang.String[] v;
java.util.HashMap v;
int v, v, v, v, v;
org.apache.parquet.column.statistics.Statistics v;
java.util.Iterator v, v;
java.lang.IllegalArgumentException v, v;
java.lang.Object v, v, v, v, v;
org.apache.parquet.io.PositionOutputStream v, v;
v := @this: org.apache.parquet.hadoop.ParquetFileWriter;
v := @parameter: org.apache.parquet.io.SeekableInputStream;
v := @parameter: org.apache.parquet.hadoop.metadata.BlockMetaData;
v := @parameter: boolean;
v = virtualinvoke v.<org.apache.parquet.hadoop.metadata.BlockMetaData: long getRowCount()>();
virtualinvoke v.<org.apache.parquet.hadoop.ParquetFileWriter: void startBlock(long)>(v);
v = new java.util.HashMap;
specialinvoke v.<java.util.HashMap: void <init>()>();
v = virtualinvoke v.<org.apache.parquet.hadoop.metadata.BlockMetaData: java.util.List getColumns()>();
v = interfaceinvoke v.<java.util.List: java.util.Iterator iterator()>();
label:
v = interfaceinvoke v.<java.util.Iterator: boolean hasNext()>();
if v == 0 goto label;
v = interfaceinvoke v.<java.util.Iterator: java.lang.Object next()>();
v = virtualinvoke v.<org.apache.parquet.hadoop.metadata.ColumnChunkMetaData: org.apache.parquet.hadoop.metadata.ColumnPath getPath()>();
v = virtualinvoke v.<org.apache.parquet.hadoop.metadata.ColumnPath: java.lang.String toDotString()>();
interfaceinvoke v.<java.util.Map: java.lang.Object put(java.lang.Object,java.lang.Object)>(v, v);
goto label;
label:
v = new java.util.ArrayList;
specialinvoke v.<java.util.ArrayList: void <init>()>();
v = v.<org.apache.parquet.hadoop.ParquetFileWriter: org.apache.parquet.schema.MessageType schema>;
v = virtualinvoke v.<org.apache.parquet.schema.MessageType: java.util.List getColumns()>();
v = interfaceinvoke v.<java.util.List: java.util.Iterator iterator()>();
label:
v = interfaceinvoke v.<java.util.Iterator: boolean hasNext()>();
if v == 0 goto label;
v = interfaceinvoke v.<java.util.Iterator: java.lang.Object next()>();
v = virtualinvoke v.<org.apache.parquet.column.ColumnDescriptor: java.lang.String[] getPath()>();
v = staticinvoke <org.apache.parquet.hadoop.metadata.ColumnPath: org.apache.parquet.hadoop.metadata.ColumnPath get(java.lang.String[])>(v);
v = virtualinvoke v.<org.apache.parquet.hadoop.metadata.ColumnPath: java.lang.String toDotString()>();
v = interfaceinvoke v.<java.util.Map: java.lang.Object remove(java.lang.Object)>(v);
if v == null goto label;
interfaceinvoke v.<java.util.List: boolean add(java.lang.Object)>(v);
goto label;
label:
v = new java.lang.IllegalArgumentException;
v = newarray (java.lang.Object)[2];
v[0] = v;
v[1] = v;
v = staticinvoke <java.lang.String: java.lang.String format(java.lang.String,java.lang.Object[])>("Missing column \'%s\', cannot copy row group: %s", v);
specialinvoke v.<java.lang.IllegalArgumentException: void <init>(java.lang.String)>(v);
throw v;
label:
if v != 0 goto label;
v = interfaceinvoke v.<java.util.Map: boolean isEmpty()>();
if v != 0 goto label;
v = new java.lang.IllegalArgumentException;
v = newarray (java.lang.Object)[1];
v = interfaceinvoke v.<java.util.Map: java.util.Set keySet()>();
v = staticinvoke <java.lang.String: java.lang.String join(java.lang.CharSequence,java.lang.Iterable)>(", ", v);
v[0] = v;
v = staticinvoke <java.lang.String: java.lang.String format(java.lang.String,java.lang.Object[])>("Columns cannot be copied (missing from target schema): %s", v);
specialinvoke v.<java.lang.IllegalArgumentException: void <init>(java.lang.String)>(v);
throw v;
label:
v = -1L;
v = 0L;
v = 0L;
v = 0;
label:
v = interfaceinvoke v.<java.util.List: int size()>();
if v >= v goto label;
v = interfaceinvoke v.<java.util.List: java.lang.Object get(int)>(v);
v = v.<org.apache.parquet.hadoop.ParquetFileWriter: org.apache.parquet.io.PositionOutputStream out>;
v = virtualinvoke v.<org.apache.parquet.io.PositionOutputStream: long getPos()>();
v = v + v;
v = v cmp 0L;
if v >= 0 goto label;
v = virtualinvoke v.<org.apache.parquet.hadoop.metadata.ColumnChunkMetaData: long getStartingPos()>();
label:
v = virtualinvoke v.<org.apache.parquet.hadoop.metadata.ColumnChunkMetaData: long getTotalSize()>();
v = v + v;
v = v + 1;
v = interfaceinvoke v.<java.util.List: int size()>();
if v == v goto label;
v = v + 1;
v = interfaceinvoke v.<java.util.List: java.lang.Object get(int)>(v);
v = virtualinvoke v.<org.apache.parquet.hadoop.metadata.ColumnChunkMetaData: long getStartingPos()>();
v = v + v;
v = v cmp v;
if v == 0 goto label;
label:
v = v.<org.apache.parquet.hadoop.ParquetFileWriter: org.apache.parquet.io.PositionOutputStream out>;
staticinvoke <org.apache.parquet.hadoop.ParquetFileWriter: void copy(org.apache.parquet.io.SeekableInputStream,org.apache.parquet.io.PositionOutputStream,long,long)>(v, v, v, v);
v = -1L;
v = 0L;
label:
v = v.<org.apache.parquet.hadoop.ParquetFileWriter: java.util.List currentColumnIndexes>;
interfaceinvoke v.<java.util.List: boolean add(java.lang.Object)>(null);
v = v.<org.apache.parquet.hadoop.ParquetFileWriter: java.util.List currentOffsetIndexes>;
interfaceinvoke v.<java.util.List: boolean add(java.lang.Object)>(null);
v = staticinvoke <org.apache.parquet.hadoop.Offsets: org.apache.parquet.hadoop.Offsets getOffsets(org.apache.parquet.io.SeekableInputStream,org.apache.parquet.hadoop.metadata.ColumnChunkMetaData,long)>(v, v, v);
v = v.<org.apache.parquet.hadoop.ParquetFileWriter: org.apache.parquet.hadoop.metadata.BlockMetaData currentBlock>;
v = virtualinvoke v.<org.apache.parquet.hadoop.metadata.ColumnChunkMetaData: org.apache.parquet.hadoop.metadata.ColumnPath getPath()>();
v = virtualinvoke v.<org.apache.parquet.hadoop.metadata.ColumnChunkMetaData: org.apache.parquet.schema.PrimitiveType getPrimitiveType()>();
v = virtualinvoke v.<org.apache.parquet.hadoop.metadata.ColumnChunkMetaData: org.apache.parquet.hadoop.metadata.CompressionCodecName getCodec()>();
v = virtualinvoke v.<org.apache.parquet.hadoop.metadata.ColumnChunkMetaData: org.apache.parquet.column.EncodingStats getEncodingStats()>();
v = virtualinvoke v.<org.apache.parquet.hadoop.metadata.ColumnChunkMetaData: java.util.Set getEncodings()>();
v = virtualinvoke v.<org.apache.parquet.hadoop.metadata.ColumnChunkMetaData: org.apache.parquet.column.statistics.Statistics getStatistics()>();
v = v.<org.apache.parquet.hadoop.Offsets: long firstDataPageOffset>;
v = v.<org.apache.parquet.hadoop.Offsets: long dictionaryPageOffset>;
v = virtualinvoke v.<org.apache.parquet.hadoop.metadata.ColumnChunkMetaData: long getValueCount()>();
v = virtualinvoke v.<org.apache.parquet.hadoop.metadata.ColumnChunkMetaData: long getTotalSize()>();
v = virtualinvoke v.<org.apache.parquet.hadoop.metadata.ColumnChunkMetaData: long getTotalUncompressedSize()>();
v = staticinvoke <org.apache.parquet.hadoop.metadata.ColumnChunkMetaData: org.apache.parquet.hadoop.metadata.ColumnChunkMetaData get(org.apache.parquet.hadoop.metadata.ColumnPath,org.apache.parquet.schema.PrimitiveType,org.apache.parquet.hadoop.metadata.CompressionCodecName,org.apache.parquet.column.EncodingStats,java.util.Set,org.apache.parquet.column.statistics.Statistics,long,long,long,long,long)>(v, v, v, v, v, v, v, v, v, v, v);
virtualinvoke v.<org.apache.parquet.hadoop.metadata.BlockMetaData: void addColumn(org.apache.parquet.hadoop.metadata.ColumnChunkMetaData)>(v);
v = virtualinvoke v.<org.apache.parquet.hadoop.metadata.ColumnChunkMetaData: long getTotalUncompressedSize()>();
v = v + v;
v = v + 1;
goto label;
label:
v = v.<org.apache.parquet.hadoop.ParquetFileWriter: org.apache.parquet.hadoop.metadata.BlockMetaData currentBlock>;
virtualinvoke v.<org.apache.parquet.hadoop.metadata.BlockMetaData: void setTotalByteSize(long)>(v);
virtualinvoke v.<org.apache.parquet.hadoop.ParquetFileWriter: void endBlock()>();
return;
}
public void appendColumnChunk(org.apache.parquet.column.ColumnDescriptor, org.apache.parquet.io.SeekableInputStream, org.apache.parquet.hadoop.metadata.ColumnChunkMetaData, org.apache.parquet.column.values.bloomfilter.BloomFilter, org.apache.parquet.internal.column.columnindex.ColumnIndex, org.apache.parquet.internal.column.columnindex.OffsetIndex) throws java.io.IOException
{
org.apache.parquet.schema.PrimitiveType v;
org.apache.parquet.internal.column.columnindex.ColumnIndex v;
org.apache.parquet.internal.column.columnindex.OffsetIndex v;
java.util.Map v;
java.util.Set v;
org.apache.parquet.hadoop.metadata.ColumnChunkMetaData v, v;
java.util.List v, v;
org.apache.parquet.column.EncodingStats v;
org.apache.parquet.column.ColumnDescriptor v;
org.apache.parquet.hadoop.metadata.BlockMetaData v, v, v;
org.apache.parquet.hadoop.metadata.CompressionCodecName v;
org.apache.parquet.hadoop.metadata.ColumnPath v;
java.lang.String[] v;
long v, v, v, v, v, v, v, v, v, v, v;
org.apache.parquet.column.values.bloomfilter.BloomFilter v;
org.apache.parquet.io.SeekableInputStream v;
java.lang.String v;
org.apache.parquet.column.statistics.Statistics v;
org.apache.parquet.hadoop.Offsets v;
org.apache.parquet.hadoop.ParquetFileWriter v;
org.apache.parquet.io.PositionOutputStream v, v;
v := @this: org.apache.parquet.hadoop.ParquetFileWriter;
v := @parameter: org.apache.parquet.column.ColumnDescriptor;
v := @parameter: org.apache.parquet.io.SeekableInputStream;
v := @parameter: org.apache.parquet.hadoop.metadata.ColumnChunkMetaData;
v := @parameter: org.apache.parquet.column.values.bloomfilter.BloomFilter;
v := @parameter: org.apache.parquet.internal.column.columnindex.ColumnIndex;
v := @parameter: org.apache.parquet.internal.column.columnindex.OffsetIndex;
v = virtualinvoke v.<org.apache.parquet.hadoop.metadata.ColumnChunkMetaData: long getStartingPos()>();
v = virtualinvoke v.<org.apache.parquet.hadoop.metadata.ColumnChunkMetaData: long getTotalSize()>();
v = v.<org.apache.parquet.hadoop.ParquetFileWriter: org.apache.parquet.io.PositionOutputStream out>;
v = virtualinvoke v.<org.apache.parquet.io.PositionOutputStream: long getPos()>();
v = v.<org.apache.parquet.hadoop.ParquetFileWriter: org.apache.parquet.io.PositionOutputStream out>;
staticinvoke <org.apache.parquet.hadoop.ParquetFileWriter: void copy(org.apache.parquet.io.SeekableInputStream,org.apache.parquet.io.PositionOutputStream,long,long)>(v, v, v, v);
v = v.<org.apache.parquet.hadoop.ParquetFileWriter: java.util.Map currentBloomFilters>;
v = virtualinvoke v.<org.apache.parquet.column.ColumnDescriptor: java.lang.String[] getPath()>();
v = staticinvoke <java.lang.String: java.lang.String join(java.lang.CharSequence,java.lang.CharSequence[])>(".", v);
interfaceinvoke v.<java.util.Map: java.lang.Object put(java.lang.Object,java.lang.Object)>(v, v);
v = v.<org.apache.parquet.hadoop.ParquetFileWriter: java.util.List currentColumnIndexes>;
interfaceinvoke v.<java.util.List: boolean add(java.lang.Object)>(v);
v = v.<org.apache.parquet.hadoop.ParquetFileWriter: java.util.List currentOffsetIndexes>;
interfaceinvoke v.<java.util.List: boolean add(java.lang.Object)>(v);
v = staticinvoke <org.apache.parquet.hadoop.Offsets: org.apache.parquet.hadoop.Offsets getOffsets(org.apache.parquet.io.SeekableInputStream,org.apache.parquet.hadoop.metadata.ColumnChunkMetaData,long)>(v, v, v);
v = v.<org.apache.parquet.hadoop.ParquetFileWriter: org.apache.parquet.hadoop.metadata.BlockMetaData currentBlock>;
v = virtualinvoke v.<org.apache.parquet.hadoop.metadata.ColumnChunkMetaData: org.apache.parquet.hadoop.metadata.ColumnPath getPath()>();
v = virtualinvoke v.<org.apache.parquet.hadoop.metadata.ColumnChunkMetaData: org.apache.parquet.schema.PrimitiveType getPrimitiveType()>();
v = virtualinvoke v.<org.apache.parquet.hadoop.metadata.ColumnChunkMetaData: org.apache.parquet.hadoop.metadata.CompressionCodecName getCodec()>();
v = virtualinvoke v.<org.apache.parquet.hadoop.metadata.ColumnChunkMetaData: org.apache.parquet.column.EncodingStats getEncodingStats()>();
v = virtualinvoke v.<org.apache.parquet.hadoop.metadata.ColumnChunkMetaData: java.util.Set getEncodings()>();
v = virtualinvoke v.<org.apache.parquet.hadoop.metadata.ColumnChunkMetaData: org.apache.parquet.column.statistics.Statistics getStatistics()>();
v = v.<org.apache.parquet.hadoop.Offsets: long firstDataPageOffset>;
v = v.<org.apache.parquet.hadoop.Offsets: long dictionaryPageOffset>;
v = virtualinvoke v.<org.apache.parquet.hadoop.metadata.ColumnChunkMetaData: long getValueCount()>();
v = virtualinvoke v.<org.apache.parquet.hadoop.metadata.ColumnChunkMetaData: long getTotalSize()>();
v = virtualinvoke v.<org.apache.parquet.hadoop.metadata.ColumnChunkMetaData: long getTotalUncompressedSize()>();
v = staticinvoke <org.apache.parquet.hadoop.metadata.ColumnChunkMetaData: org.apache.parquet.hadoop.metadata.ColumnChunkMetaData get(org.apache.parquet.hadoop.metadata.ColumnPath,org.apache.parquet.schema.PrimitiveType,org.apache.parquet.hadoop.metadata.CompressionCodecName,org.apache.parquet.column.EncodingStats,java.util.Set,org.apache.parquet.column.statistics.Statistics,long,long,long,long,long)>(v, v, v, v, v, v, v, v, v, v, v);
virtualinvoke v.<org.apache.parquet.hadoop.metadata.BlockMetaData: void addColumn(org.apache.parquet.hadoop.metadata.ColumnChunkMetaData)>(v);
v = v.<org.apache.parquet.hadoop.ParquetFileWriter: org.apache.parquet.hadoop.metadata.BlockMetaData currentBlock>;
v = v.<org.apache.parquet.hadoop.ParquetFileWriter: org.apache.parquet.hadoop.metadata.BlockMetaData currentBlock>;
v = virtualinvoke v.<org.apache.parquet.hadoop.metadata.BlockMetaData: long getTotalByteSize()>();
v = virtualinvoke v.<org.apache.parquet.hadoop.metadata.ColumnChunkMetaData: long getTotalUncompressedSize()>();
v = v + v;
virtualinvoke v.<org.apache.parquet.hadoop.metadata.BlockMetaData: void setTotalByteSize(long)>(v);
return;
}
private static void copy(org.apache.parquet.io.SeekableInputStream, org.apache.parquet.io.PositionOutputStream, long, long) throws java.io.IOException
{
java.lang.Object[] v;
long v, v, v, v, v;
byte v, v;
java.lang.Long v, v, v;
int v, v, v;
org.apache.parquet.io.SeekableInputStream v;
java.lang.String v;
java.lang.ThreadLocal v;
org.slf4j.Logger v;
java.lang.IllegalArgumentException v;
java.lang.Object v;
org.apache.parquet.io.PositionOutputStream v;
v := @parameter: org.apache.parquet.io.SeekableInputStream;
v := @parameter: org.apache.parquet.io.PositionOutputStream;
v := @parameter: long;
v := @parameter: long;
v = <org.apache.parquet.hadoop.ParquetFileWriter: org.slf4j.Logger LOG>;
v = newarray (java.lang.Object)[3];
v = staticinvoke <java.lang.Long: java.lang.Long valueOf(long)>(v);
v[0] = v;
v = staticinvoke <java.lang.Long: java.lang.Long valueOf(long)>(v);
v[1] = v;
v = virtualinvoke v.<org.apache.parquet.io.PositionOutputStream: long getPos()>();
v = staticinvoke <java.lang.Long: java.lang.Long valueOf(long)>(v);
v[2] = v;
interfaceinvoke v.<org.slf4j.Logger: void debug(java.lang.String,java.lang.Object[])>("Copying {} bytes at {} to {}", v);
virtualinvoke v.<org.apache.parquet.io.SeekableInputStream: void seek(long)>(v);
v = 0L;
v = <org.apache.parquet.hadoop.ParquetFileWriter: java.lang.ThreadLocal COPY_BUFFER>;
v = virtualinvoke v.<java.lang.ThreadLocal: java.lang.Object get()>();
label:
v = v cmp v;
if v >= 0 goto label;
v = v - v;
v = lengthof v;
v = v cmp v;
if v >= 0 goto label;
v = lengthof v;
label:
v = virtualinvoke v.<org.apache.parquet.io.SeekableInputStream: int read(byte[],int,int)>(v, 0, v);
if v >= 0 goto label;
v = new java.lang.IllegalArgumentException;
v = dynamicinvoke "makeConcatWithConstants" <java.lang.String (long,long)>(v, v) <java.lang.invoke.StringConcatFactory: java.lang.invoke.CallSite makeConcatWithConstants(java.lang.invoke.MethodHandles$Lookup,java.lang.String,java.lang.invoke.MethodType,java.lang.String,java.lang.Object[])>("Unexpected end of input file at \u0001\u0001");
specialinvoke v.<java.lang.IllegalArgumentException: void <init>(java.lang.String)>(v);
throw v;
label:
virtualinvoke v.<org.apache.parquet.io.PositionOutputStream: void write(byte[],int,int)>(v, 0, v);
v = v + v;
goto label;
label:
return;
}
public void end(java.util.Map) throws java.io.IOException
{
long v;
org.apache.parquet.crypto.InternalFileEncryptor v, v, v, v;
java.lang.Long v;
java.util.Map v;
org.slf4j.Logger v;
org.apache.parquet.hadoop.ParquetFileWriter v;
org.apache.parquet.hadoop.ParquetFileWriter$STATE v, v;
org.apache.parquet.schema.MessageType v;
java.util.List v, v, v, v, v, v, v;
org.apache.parquet.hadoop.metadata.FileMetaData v;
org.apache.parquet.hadoop.metadata.ParquetMetadata v, v;
org.apache.parquet.io.PositionOutputStream v, v, v, v, v, v;
v := @this: org.apache.parquet.hadoop.ParquetFileWriter;
v := @parameter: java.util.Map;
v = v.<org.apache.parquet.hadoop.ParquetFileWriter: org.apache.parquet.hadoop.ParquetFileWriter$STATE state>;
v = virtualinvoke v.<org.apache.parquet.hadoop.ParquetFileWriter$STATE: org.apache.parquet.hadoop.ParquetFileWriter$STATE end()>();
v.<org.apache.parquet.hadoop.ParquetFileWriter: org.apache.parquet.hadoop.ParquetFileWriter$STATE state> = v;
v = v.<org.apache.parquet.hadoop.ParquetFileWriter: java.util.List columnIndexes>;
v = v.<org.apache.parquet.hadoop.ParquetFileWriter: java.util.List blocks>;
v = v.<org.apache.parquet.hadoop.ParquetFileWriter: org.apache.parquet.io.PositionOutputStream out>;
v = v.<org.apache.parquet.hadoop.ParquetFileWriter: org.apache.parquet.crypto.InternalFileEncryptor fileEncryptor>;
staticinvoke <org.apache.parquet.hadoop.ParquetFileWriter: void serializeColumnIndexes(java.util.List,java.util.List,org.apache.parquet.io.PositionOutputStream,org.apache.parquet.crypto.InternalFileEncryptor)>(v, v, v, v);
v = v.<org.apache.parquet.hadoop.ParquetFileWriter: java.util.List offsetIndexes>;
v = v.<org.apache.parquet.hadoop.ParquetFileWriter: java.util.List blocks>;
v = v.<org.apache.parquet.hadoop.ParquetFileWriter: org.apache.parquet.io.PositionOutputStream out>;
v = v.<org.apache.parquet.hadoop.ParquetFileWriter: org.apache.parquet.crypto.InternalFileEncryptor fileEncryptor>;
staticinvoke <org.apache.parquet.hadoop.ParquetFileWriter: void serializeOffsetIndexes(java.util.List,java.util.List,org.apache.parquet.io.PositionOutputStream,org.apache.parquet.crypto.InternalFileEncryptor)>(v, v, v, v);
v = v.<org.apache.parquet.hadoop.ParquetFileWriter: java.util.List bloomFilters>;
v = v.<org.apache.parquet.hadoop.ParquetFileWriter: java.util.List blocks>;
v = v.<org.apache.parquet.hadoop.ParquetFileWriter: org.apache.parquet.io.PositionOutputStream out>;
v = v.<org.apache.parquet.hadoop.ParquetFileWriter: org.apache.parquet.crypto.InternalFileEncryptor fileEncryptor>;
staticinvoke <org.apache.parquet.hadoop.ParquetFileWriter: void serializeBloomFilters(java.util.List,java.util.List,org.apache.parquet.io.PositionOutputStream,org.apache.parquet.crypto.InternalFileEncryptor)>(v, v, v, v);
v = <org.apache.parquet.hadoop.ParquetFileWriter: org.slf4j.Logger LOG>;
v = v.<org.apache.parquet.hadoop.ParquetFileWriter: org.apache.parquet.io.PositionOutputStream out>;
v = virtualinvoke v.<org.apache.parquet.io.PositionOutputStream: long getPos()>();
v = staticinvoke <java.lang.Long: java.lang.Long valueOf(long)>(v);
interfaceinvoke v.<org.slf4j.Logger: void debug(java.lang.String,java.lang.Object)>("{}: end", v);
v = new org.apache.parquet.hadoop.metadata.ParquetMetadata;
v = new org.apache.parquet.hadoop.metadata.FileMetaData;
v = v.<org.apache.parquet.hadoop.ParquetFileWriter: org.apache.parquet.schema.MessageType schema>;
specialinvoke v.<org.apache.parquet.hadoop.metadata.FileMetaData: void <init>(org.apache.parquet.schema.MessageType,java.util.Map,java.lang.String)>(v, v, "parquet-mr version 1.12.3 (build f8dced182c4c1fbdec6ccb3185537b5a01e6ed6b)");
v = v.<org.apache.parquet.hadoop.ParquetFileWriter: java.util.List blocks>;
specialinvoke v.<org.apache.parquet.hadoop.metadata.ParquetMetadata: void <init>(org.apache.parquet.hadoop.metadata.FileMetaData,java.util.List)>(v, v);
v.<org.apache.parquet.hadoop.ParquetFileWriter: org.apache.parquet.hadoop.metadata.ParquetMetadata footer> = v;
v = v.<org.apache.parquet.hadoop.ParquetFileWriter: org.apache.parquet.hadoop.metadata.ParquetMetadata footer>;
v = v.<org.apache.parquet.hadoop.ParquetFileWriter: org.apache.parquet.io.PositionOutputStream out>;
v = v.<org.apache.parquet.hadoop.ParquetFileWriter: org.apache.parquet.crypto.InternalFileEncryptor fileEncryptor>;
staticinvoke <org.apache.parquet.hadoop.ParquetFileWriter: void serializeFooter(org.apache.parquet.hadoop.metadata.ParquetMetadata,org.apache.parquet.io.PositionOutputStream,org.apache.parquet.crypto.InternalFileEncryptor)>(v, v, v);
v = v.<org.apache.parquet.hadoop.ParquetFileWriter: org.apache.parquet.io.PositionOutputStream out>;
virtualinvoke v.<org.apache.parquet.io.PositionOutputStream: void close()>();
return;
}
private static void serializeColumnIndexes(java.util.List, java.util.List, org.apache.parquet.io.PositionOutputStream, org.apache.parquet.crypto.InternalFileEncryptor) throws java.io.IOException
{
org.apache.parquet.schema.PrimitiveType v;
byte[] v, v;
java.lang.Long v;
org.apache.parquet.crypto.InternalColumnEncryptionSetup v;
org.apache.parquet.internal.hadoop.metadata.IndexReference v;
org.apache.parquet.format.BlockCipher$Encryptor v;
boolean v;
org.apache.parquet.format.ColumnIndex v;
java.util.List v, v, v;
org.apache.parquet.crypto.ModuleCipherFactory$ModuleType v;
org.apache.parquet.hadoop.metadata.ColumnPath v;
long v, v, v, v;
org.apache.parquet.crypto.InternalFileEncryptor v;
int v, v, v, v, v, v, v;
org.slf4j.Logger v;
java.lang.Object v, v, v, v;
org.apache.parquet.io.PositionOutputStream v;
v := @parameter: java.util.List;
v := @parameter: java.util.List;
v := @parameter: org.apache.parquet.io.PositionOutputStream;
v := @parameter: org.apache.parquet.crypto.InternalFileEncryptor;
v = <org.apache.parquet.hadoop.ParquetFileWriter: org.slf4j.Logger LOG>;
v = virtualinvoke v.<org.apache.parquet.io.PositionOutputStream: long getPos()>();
v = staticinvoke <java.lang.Long: java.lang.Long valueOf(long)>(v);
interfaceinvoke v.<org.slf4j.Logger: void debug(java.lang.String,java.lang.Object)>("{}: column indexes", v);
v = 0;
v = interfaceinvoke v.<java.util.List: int size()>();
label:
if v >= v goto label;
v = interfaceinvoke v.<java.util.List: java.lang.Object get(int)>(v);
v = virtualinvoke v.<org.apache.parquet.hadoop.metadata.BlockMetaData: java.util.List getColumns()>();
v = interfaceinvoke v.<java.util.List: java.lang.Object get(int)>(v);
v = 0;
v = interfaceinvoke v.<java.util.List: int size()>();
label:
if v >= v goto label;
v = interfaceinvoke v.<java.util.List: java.lang.Object get(int)>(v);
v = virtualinvoke v.<org.apache.parquet.hadoop.metadata.ColumnChunkMetaData: org.apache.parquet.schema.PrimitiveType getPrimitiveType()>();
v = interfaceinvoke v.<java.util.List: java.lang.Object get(int)>(v);
v = staticinvoke <org.apache.parquet.format.converter.ParquetMetadataConverter: org.apache.parquet.format.ColumnIndex toParquetColumnIndex(org.apache.parquet.schema.PrimitiveType,org.apache.parquet.internal.column.columnindex.ColumnIndex)>(v, v);
if v == null goto label;
v = null;
v = null;
if null == v goto label;
v = virtualinvoke v.<org.apache.parquet.hadoop.metadata.ColumnChunkMetaData: org.apache.parquet.hadoop.metadata.ColumnPath getPath()>();
v = virtualinvoke v.<org.apache.parquet.crypto.InternalFileEncryptor: org.apache.parquet.crypto.InternalColumnEncryptionSetup getColumnSetup(org.apache.parquet.hadoop.metadata.ColumnPath,boolean,int)>(v, 0, v);
v = virtualinvoke v.<org.apache.parquet.crypto.InternalColumnEncryptionSetup: boolean isEncrypted()>();
if v == 0 goto label;
v = virtualinvoke v.<org.apache.parquet.crypto.InternalColumnEncryptionSetup: org.apache.parquet.format.BlockCipher$Encryptor getMetaDataEncryptor()>();
v = virtualinvoke v.<org.apache.parquet.crypto.InternalFileEncryptor: byte[] getFileAAD()>();
v = <org.apache.parquet.crypto.ModuleCipherFactory$ModuleType: org.apache.parquet.crypto.ModuleCipherFactory$ModuleType ColumnIndex>;
v = virtualinvoke v.<org.apache.parquet.hadoop.metadata.BlockMetaData: int getOrdinal()>();
v = virtualinvoke v.<org.apache.parquet.crypto.InternalColumnEncryptionSetup: int getOrdinal()>();
v = (int) -1;
v = staticinvoke <org.apache.parquet.crypto.AesCipher: byte[] createModuleAAD(byte[],org.apache.parquet.crypto.ModuleCipherFactory$ModuleType,int,int,int)>(v, v, v, v, v);
label:
v = virtualinvoke v.<org.apache.parquet.io.PositionOutputStream: long getPos()>();
staticinvoke <org.apache.parquet.format.Util: void writeColumnIndex(org.apache.parquet.format.ColumnIndex,java.io.OutputStream,org.apache.parquet.format.BlockCipher$Encryptor,byte[])>(v, v, v, v);
v = new org.apache.parquet.internal.hadoop.metadata.IndexReference;
v = virtualinvoke v.<org.apache.parquet.io.PositionOutputStream: long getPos()>();
v = v - v;
specialinvoke v.<org.apache.parquet.internal.hadoop.metadata.IndexReference: void <init>(long,int)>(v, v);
virtualinvoke v.<org.apache.parquet.hadoop.metadata.ColumnChunkMetaData: void setColumnIndexReference(org.apache.parquet.internal.hadoop.metadata.IndexReference)>(v);
label:
v = v + 1;
goto label;
label:
v = v + 1;
goto label;
label:
return;
}
private int toIntWithCheck(long)
{
byte v;
org.apache.parquet.hadoop.ParquetFileWriter v;
int v;
org.apache.parquet.io.ParquetEncodingException v;
long v;
java.lang.String v;
v := @this: org.apache.parquet.hadoop.ParquetFileWriter;
v := @parameter: long;
v = v cmp v;
if v == 0 goto label;
v = new org.apache.parquet.io.ParquetEncodingException;
v = dynamicinvoke "makeConcatWithConstants" <java.lang.String (long)>(v) <java.lang.invoke.StringConcatFactory: java.lang.invoke.CallSite makeConcatWithConstants(java.lang.invoke.MethodHandles$Lookup,java.lang.String,java.lang.invoke.MethodType,java.lang.String,java.lang.Object[])>("Cannot write page larger than 2147483647 bytes: \u0001");
specialinvoke v.<org.apache.parquet.io.ParquetEncodingException: void <init>(java.lang.String)>(v);
throw v;
label:
return v;
}
private static void serializeOffsetIndexes(java.util.List, java.util.List, org.apache.parquet.io.PositionOutputStream, org.apache.parquet.crypto.InternalFileEncryptor) throws java.io.IOException
{
byte[] v, v;
org.apache.parquet.hadoop.metadata.ColumnPath v;
long v, v, v, v;
org.apache.parquet.crypto.InternalFileEncryptor v;
java.lang.Long v;
org.apache.parquet.crypto.InternalColumnEncryptionSetup v;
int v, v, v, v, v, v, v;
org.apache.parquet.internal.hadoop.metadata.IndexReference v;
org.apache.parquet.format.BlockCipher$Encryptor v;
boolean v;
org.slf4j.Logger v;
java.util.List v, v, v;
org.apache.parquet.crypto.ModuleCipherFactory$ModuleType v;
java.lang.Object v, v, v, v;
org.apache.parquet.io.PositionOutputStream v;
org.apache.parquet.format.OffsetIndex v;
v := @parameter: java.util.List;
v := @parameter: java.util.List;
v := @parameter: org.apache.parquet.io.PositionOutputStream;
v := @parameter: org.apache.parquet.crypto.InternalFileEncryptor;
v = <org.apache.parquet.hadoop.ParquetFileWriter: org.slf4j.Logger LOG>;
v = virtualinvoke v.<org.apache.parquet.io.PositionOutputStream: long getPos()>();
v = staticinvoke <java.lang.Long: java.lang.Long valueOf(long)>(v);
interfaceinvoke v.<org.slf4j.Logger: void debug(java.lang.String,java.lang.Object)>("{}: offset indexes", v);
v = 0;
v = interfaceinvoke v.<java.util.List: int size()>();
label:
if v >= v goto label;
v = interfaceinvoke v.<java.util.List: java.lang.Object get(int)>(v);
v = virtualinvoke v.<org.apache.parquet.hadoop.metadata.BlockMetaData: java.util.List getColumns()>();
v = interfaceinvoke v.<java.util.List: java.lang.Object get(int)>(v);
v = 0;
v = interfaceinvoke v.<java.util.List: int size()>();
label:
if v >= v goto label;
v = interfaceinvoke v.<java.util.List: java.lang.Object get(int)>(v);
if v == null goto label;
v = interfaceinvoke v.<java.util.List: java.lang.Object get(int)>(v);
v = null;
v = null;
if null == v goto label;
v = virtualinvoke v.<org.apache.parquet.hadoop.metadata.ColumnChunkMetaData: org.apache.parquet.hadoop.metadata.ColumnPath getPath()>();
v = virtualinvoke v.<org.apache.parquet.crypto.InternalFileEncryptor: org.apache.parquet.crypto.InternalColumnEncryptionSetup getColumnSetup(org.apache.parquet.hadoop.metadata.ColumnPath,boolean,int)>(v, 0, v);
v = virtualinvoke v.<org.apache.parquet.crypto.InternalColumnEncryptionSetup: boolean isEncrypted()>();
if v == 0 goto label;
v = virtualinvoke v.<org.apache.parquet.crypto.InternalColumnEncryptionSetup: org.apache.parquet.format.BlockCipher$Encryptor getMetaDataEncryptor()>();
v = virtualinvoke v.<org.apache.parquet.crypto.InternalFileEncryptor: byte[] getFileAAD()>();
v = <org.apache.parquet.crypto.ModuleCipherFactory$ModuleType: org.apache.parquet.crypto.ModuleCipherFactory$ModuleType OffsetIndex>;
v = virtualinvoke v.<org.apache.parquet.hadoop.metadata.BlockMetaData: int getOrdinal()>();
v = virtualinvoke v.<org.apache.parquet.crypto.InternalColumnEncryptionSetup: int getOrdinal()>();
v = (int) -1;
v = staticinvoke <org.apache.parquet.crypto.AesCipher: byte[] createModuleAAD(byte[],org.apache.parquet.crypto.ModuleCipherFactory$ModuleType,int,int,int)>(v, v, v, v, v);
label:
v = virtualinvoke v.<org.apache.parquet.io.PositionOutputStream: long getPos()>();
v = staticinvoke <org.apache.parquet.format.converter.ParquetMetadataConverter: org.apache.parquet.format.OffsetIndex toParquetOffsetIndex(org.apache.parquet.internal.column.columnindex.OffsetIndex)>(v);
staticinvoke <org.apache.parquet.format.Util: void writeOffsetIndex(org.apache.parquet.format.OffsetIndex,java.io.OutputStream,org.apache.parquet.format.BlockCipher$Encryptor,byte[])>(v, v, v, v);
v = new org.apache.parquet.internal.hadoop.metadata.IndexReference;
v = virtualinvoke v.<org.apache.parquet.io.PositionOutputStream: long getPos()>();
v = v - v;
specialinvoke v.<org.apache.parquet.internal.hadoop.metadata.IndexReference: void <init>(long,int)>(v, v);
virtualinvoke v.<org.apache.parquet.hadoop.metadata.ColumnChunkMetaData: void setOffsetIndexReference(org.apache.parquet.internal.hadoop.metadata.IndexReference)>(v);
label:
v = v + 1;
goto label;
label:
v = v + 1;
goto label;
label:
return;
}
private static void serializeBloomFilters(java.util.List, java.util.List, org.apache.parquet.io.PositionOutputStream, org.apache.parquet.crypto.InternalFileEncryptor) throws java.io.IOException
{
byte[] v, v, v, v, v;
java.lang.Long v;
org.apache.parquet.crypto.InternalColumnEncryptionSetup v;
org.apache.parquet.format.BlockCipher$Encryptor v;
boolean v, v;
java.util.List v, v, v;
org.apache.parquet.crypto.ModuleCipherFactory$ModuleType v, v;
org.apache.parquet.hadoop.metadata.ColumnPath v, v;
java.io.ByteArrayOutputStream v;
long v, v;
org.apache.parquet.crypto.InternalFileEncryptor v;
int v, v, v, v, v, v, v, v, v;
org.apache.parquet.format.BloomFilterHeader v;
java.lang.String v;
org.slf4j.Logger v;
java.lang.Object v, v, v, v;
org.apache.parquet.io.PositionOutputStream v;
v := @parameter: java.util.List;
v := @parameter: java.util.List;
v := @parameter: org.apache.parquet.io.PositionOutputStream;
v := @parameter: org.apache.parquet.crypto.InternalFileEncryptor;
v = <org.apache.parquet.hadoop.ParquetFileWriter: org.slf4j.Logger LOG>;
v = virtualinvoke v.<org.apache.parquet.io.PositionOutputStream: long getPos()>();
v = staticinvoke <java.lang.Long: java.lang.Long valueOf(long)>(v);
interfaceinvoke v.<org.slf4j.Logger: void debug(java.lang.String,java.lang.Object)>("{}: bloom filters", v);
v = 0;
v = interfaceinvoke v.<java.util.List: int size()>();
label:
if v >= v goto label;
v = interfaceinvoke v.<java.util.List: java.lang.Object get(int)>(v);
v = virtualinvoke v.<org.apache.parquet.hadoop.metadata.BlockMetaData: java.util.List getColumns()>();
v = interfaceinvoke v.<java.util.List: java.lang.Object get(int)>(v);
v = interfaceinvoke v.<java.util.Map: boolean isEmpty()>();
if v != 0 goto label;
v = 0;
v = interfaceinvoke v.<java.util.List: int size()>();
label:
if v >= v goto label;
v = interfaceinvoke v.<java.util.List: java.lang.Object get(int)>(v);
v = virtualinvoke v.<org.apache.parquet.hadoop.metadata.ColumnChunkMetaData: org.apache.parquet.hadoop.metadata.ColumnPath getPath()>();
v = virtualinvoke v.<org.apache.parquet.hadoop.metadata.ColumnPath: java.lang.String toDotString()>();
v = interfaceinvoke v.<java.util.Map: java.lang.Object get(java.lang.Object)>(v);
if v == null goto label;
v = virtualinvoke v.<org.apache.parquet.io.PositionOutputStream: long getPos()>();
virtualinvoke v.<org.apache.parquet.hadoop.metadata.ColumnChunkMetaData: void setBloomFilterOffset(long)>(v);
v = null;
v = null;
v = null;
if null == v goto label;
v = virtualinvoke v.<org.apache.parquet.hadoop.metadata.ColumnChunkMetaData: org.apache.parquet.hadoop.metadata.ColumnPath getPath()>();
v = virtualinvoke v.<org.apache.parquet.crypto.InternalFileEncryptor: org.apache.parquet.crypto.InternalColumnEncryptionSetup getColumnSetup(org.apache.parquet.hadoop.metadata.ColumnPath,boolean,int)>(v, 0, v);
v = virtualinvoke v.<org.apache.parquet.crypto.InternalColumnEncryptionSetup: boolean isEncrypted()>();
if v == 0 goto label;
v = virtualinvoke v.<org.apache.parquet.crypto.InternalColumnEncryptionSetup: org.apache.parquet.format.BlockCipher$Encryptor getMetaDataEncryptor()>();
v = virtualinvoke v.<org.apache.parquet.crypto.InternalColumnEncryptionSetup: int getOrdinal()>();
v = virtualinvoke v.<org.apache.parquet.crypto.InternalFileEncryptor: byte[] getFileAAD()>();
v = <org.apache.parquet.crypto.ModuleCipherFactory$ModuleType: org.apache.parquet.crypto.ModuleCipherFactory$ModuleType BloomFilterHeader>;
v = virtualinvoke v.<org.apache.parquet.hadoop.metadata.BlockMetaData: int getOrdinal()>();
v = (int) -1;
v = staticinvoke <org.apache.parquet.crypto.AesCipher: byte[] createModuleAAD(byte[],org.apache.parquet.crypto.ModuleCipherFactory$ModuleType,int,int,int)>(v, v, v, v, v);
v = virtualinvoke v.<org.apache.parquet.crypto.InternalFileEncryptor: byte[] getFileAAD()>();
v = <org.apache.parquet.crypto.ModuleCipherFactory$ModuleType: org.apache.parquet.crypto.ModuleCipherFactory$ModuleType BloomFilterBitset>;
v = virtualinvoke v.<org.apache.parquet.hadoop.metadata.BlockMetaData: int getOrdinal()>();
v = (int) -1;
v = staticinvoke <org.apache.parquet.crypto.AesCipher: byte[] createModuleAAD(byte[],org.apache.parquet.crypto.ModuleCipherFactory$ModuleType,int,int,int)>(v, v, v, v, v);
label:
v = staticinvoke <org.apache.parquet.format.converter.ParquetMetadataConverter: org.apache.parquet.format.BloomFilterHeader toBloomFilterHeader(org.apache.parquet.column.values.bloomfilter.BloomFilter)>(v);
staticinvoke <org.apache.parquet.format.Util: void writeBloomFilterHeader(org.apache.parquet.format.BloomFilterHeader,java.io.OutputStream,org.apache.parquet.format.BlockCipher$Encryptor,byte[])>(v, v, v, v);
v = new java.io.ByteArrayOutputStream;
specialinvoke v.<java.io.ByteArrayOutputStream: void <init>()>();
interfaceinvoke v.<org.apache.parquet.column.values.bloomfilter.BloomFilter: void writeTo(java.io.OutputStream)>(v);
v = virtualinvoke v.<java.io.ByteArrayOutputStream: byte[] toByteArray()>();
if null == v goto label;
v = interfaceinvoke v.<org.apache.parquet.format.BlockCipher$Encryptor: byte[] encrypt(byte[],byte[])>(v, v);
label:
virtualinvoke v.<org.apache.parquet.io.PositionOutputStream: void write(byte[])>(v);
label:
v = v + 1;
goto label;
label:
v = v + 1;
goto label;
label:
return;
}
private static void serializeFooter(org.apache.parquet.hadoop.metadata.ParquetMetadata, org.apache.parquet.io.PositionOutputStream, org.apache.parquet.crypto.InternalFileEncryptor) throws java.io.IOException
{
byte[] v, v, v, v, v, v, v, v, v, v, v;
org.apache.parquet.crypto.AesGcmEncryptor v;
org.apache.parquet.format.converter.ParquetMetadataConverter v;
java.lang.Integer v;
java.lang.Long v, v, v, v, v;
org.apache.parquet.format.BlockCipher$Encryptor v;
org.apache.parquet.format.EncryptionAlgorithm v;
boolean v;
java.io.ByteArrayOutputStream v;
long v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v;
org.apache.parquet.format.FileMetaData v, v;
org.apache.parquet.crypto.InternalFileEncryptor v;
int v, v;
org.apache.parquet.format.FileCryptoMetaData v;
org.slf4j.Logger v, v, v;
org.apache.parquet.hadoop.metadata.ParquetMetadata v;
org.apache.parquet.io.PositionOutputStream v;
v := @parameter: org.apache.parquet.hadoop.metadata.ParquetMetadata;
v := @parameter: org.apache.parquet.io.PositionOutputStream;
v := @parameter: org.apache.parquet.crypto.InternalFileEncryptor;
v = new org.apache.parquet.format.converter.ParquetMetadataConverter;
specialinvoke v.<org.apache.parquet.format.converter.ParquetMetadataConverter: void <init>()>();
if null != v goto label;
v = virtualinvoke v.<org.apache.parquet.io.PositionOutputStream: long getPos()>();
v = virtualinvoke v.<org.apache.parquet.format.converter.ParquetMetadataConverter: org.apache.parquet.format.FileMetaData toParquetMetadata(int,org.apache.parquet.hadoop.metadata.ParquetMetadata)>(1, v);
staticinvoke <org.apache.parquet.format.Util: void writeFileMetaData(org.apache.parquet.format.FileMetaData,java.io.OutputStream)>(v, v);
v = <org.apache.parquet.hadoop.ParquetFileWriter: org.slf4j.Logger LOG>;
v = virtualinvoke v.<org.apache.parquet.io.PositionOutputStream: long getPos()>();
v = staticinvoke <java.lang.Long: java.lang.Long valueOf(long)>(v);
v = virtualinvoke v.<org.apache.parquet.io.PositionOutputStream: long getPos()>();
v = v - v;
v = staticinvoke <java.lang.Long: java.lang.Long valueOf(long)>(v);
interfaceinvoke v.<org.slf4j.Logger: void debug(java.lang.String,java.lang.Object,java.lang.Object)>("{}: footer length = {}", v, v);
v = virtualinvoke v.<org.apache.parquet.io.PositionOutputStream: long getPos()>();
v = v - v;
staticinvoke <org.apache.parquet.bytes.BytesUtils: void writeIntLittleEndian(java.io.OutputStream,int)>(v, v);
v = <org.apache.parquet.hadoop.ParquetFileWriter: byte[] MAGIC>;
virtualinvoke v.<org.apache.parquet.io.PositionOutputStream: void write(byte[])>(v);
return;
label:
v = virtualinvoke v.<org.apache.parquet.format.converter.ParquetMetadataConverter: org.apache.parquet.format.FileMetaData toParquetMetadata(int,org.apache.parquet.hadoop.metadata.ParquetMetadata,org.apache.parquet.crypto.InternalFileEncryptor)>(1, v, v);
v = virtualinvoke v.<org.apache.parquet.crypto.InternalFileEncryptor: boolean isFooterEncrypted()>();
if v != 0 goto label;
v = virtualinvoke v.<org.apache.parquet.io.PositionOutputStream: long getPos()>();
v = virtualinvoke v.<org.apache.parquet.crypto.InternalFileEncryptor: org.apache.parquet.format.EncryptionAlgorithm getEncryptionAlgorithm()>();
virtualinvoke v.<org.apache.parquet.format.FileMetaData: org.apache.parquet.format.FileMetaData setEncryption_algorithm(org.apache.parquet.format.EncryptionAlgorithm)>(v);
v = virtualinvoke v.<org.apache.parquet.crypto.InternalFileEncryptor: byte[] getFooterSigningKeyMetaData()>();
if null == v goto label;
virtualinvoke v.<org.apache.parquet.format.FileMetaData: org.apache.parquet.format.FileMetaData setFooter_signing_key_metadata(byte[])>(v);
label:
v = new java.io.ByteArrayOutputStream;
specialinvoke v.<java.io.ByteArrayOutputStream: void <init>()>();
staticinvoke <org.apache.parquet.format.Util: void writeFileMetaData(org.apache.parquet.format.FileMetaData,java.io.OutputStream)>(v, v);
v = virtualinvoke v.<java.io.ByteArrayOutputStream: byte[] toByteArray()>();
v = virtualinvoke v.<org.apache.parquet.crypto.InternalFileEncryptor: byte[] getFileAAD()>();
v = staticinvoke <org.apache.parquet.crypto.AesCipher: byte[] createFooterAAD(byte[])>(v);
v = virtualinvoke v.<org.apache.parquet.crypto.InternalFileEncryptor: org.apache.parquet.crypto.AesGcmEncryptor getSignedFooterEncryptor()>();
v = virtualinvoke v.<org.apache.parquet.crypto.AesGcmEncryptor: byte[] encrypt(byte[],byte[])>(v, v);
v = newarray (byte)[28];
staticinvoke <java.lang.System: void arraycopy(java.lang.Object,int,java.lang.Object,int,int)>(v, 4, v, 0, 12);
v = lengthof v;
v = v - 16;
staticinvoke <java.lang.System: void arraycopy(java.lang.Object,int,java.lang.Object,int,int)>(v, v, v, 12, 16);
virtualinvoke v.<org.apache.parquet.io.PositionOutputStream: void write(byte[])>(v);
virtualinvoke v.<org.apache.parquet.io.PositionOutputStream: void write(byte[])>(v);
v = <org.apache.parquet.hadoop.ParquetFileWriter: org.slf4j.Logger LOG>;
v = virtualinvoke v.<org.apache.parquet.io.PositionOutputStream: long getPos()>();
v = staticinvoke <java.lang.Long: java.lang.Long valueOf(long)>(v);
v = virtualinvoke v.<org.apache.parquet.io.PositionOutputStream: long getPos()>();
v = v - v;
v = staticinvoke <java.lang.Long: java.lang.Long valueOf(long)>(v);
interfaceinvoke v.<org.slf4j.Logger: void debug(java.lang.String,java.lang.Object,java.lang.Object)>("{}: footer and signature length = {}", v, v);
v = virtualinvoke v.<org.apache.parquet.io.PositionOutputStream: long getPos()>();
v = v - v;
staticinvoke <org.apache.parquet.bytes.BytesUtils: void writeIntLittleEndian(java.io.OutputStream,int)>(v, v);
v = <org.apache.parquet.hadoop.ParquetFileWriter: byte[] MAGIC>;
virtualinvoke v.<org.apache.parquet.io.PositionOutputStream: void write(byte[])>(v);
return;
label:
v = virtualinvoke v.<org.apache.parquet.io.PositionOutputStream: long getPos()>();
v = virtualinvoke v.<org.apache.parquet.crypto.InternalFileEncryptor: org.apache.parquet.format.FileCryptoMetaData getFileCryptoMetaData()>();
staticinvoke <org.apache.parquet.format.Util: void writeFileCryptoMetaData(org.apache.parquet.format.FileCryptoMetaData,java.io.OutputStream)>(v, v);
v = virtualinvoke v.<org.apache.parquet.crypto.InternalFileEncryptor: byte[] getFileAAD()>();
v = staticinvoke <org.apache.parquet.crypto.AesCipher: byte[] createFooterAAD(byte[])>(v);
v = virtualinvoke v.<org.apache.parquet.crypto.InternalFileEncryptor: org.apache.parquet.format.BlockCipher$Encryptor getFooterEncryptor()>();
staticinvoke <org.apache.parquet.format.Util: void writeFileMetaData(org.apache.parquet.format.FileMetaData,java.io.OutputStream,org.apache.parquet.format.BlockCipher$Encryptor,byte[])>(v, v, v, v);
v = virtualinvoke v.<org.apache.parquet.io.PositionOutputStream: long getPos()>();
v = v - v;
v = <org.apache.parquet.hadoop.ParquetFileWriter: org.slf4j.Logger LOG>;
v = virtualinvoke v.<org.apache.parquet.io.PositionOutputStream: long getPos()>();
v = staticinvoke <java.lang.Long: java.lang.Long valueOf(long)>(v);
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(v);
interfaceinvoke v.<org.slf4j.Logger: void debug(java.lang.String,java.lang.Object,java.lang.Object)>("{}: crypto metadata and footer length = {}", v, v);
staticinvoke <org.apache.parquet.bytes.BytesUtils: void writeIntLittleEndian(java.io.OutputStream,int)>(v, v);
v = <org.apache.parquet.hadoop.ParquetFileWriter: byte[] EFMAGIC>;
virtualinvoke v.<org.apache.parquet.io.PositionOutputStream: void write(byte[])>(v);
return;
}
public org.apache.parquet.hadoop.metadata.ParquetMetadata getFooter()
{
org.apache.parquet.hadoop.ParquetFileWriter v;
org.apache.parquet.hadoop.metadata.ParquetMetadata v;
boolean v;
org.apache.parquet.hadoop.ParquetFileWriter$STATE v, v;
v := @this: org.apache.parquet.hadoop.ParquetFileWriter;
v = v.<org.apache.parquet.hadoop.ParquetFileWriter: org.apache.parquet.hadoop.ParquetFileWriter$STATE state>;
v = <org.apache.parquet.hadoop.ParquetFileWriter$STATE: org.apache.parquet.hadoop.ParquetFileWriter$STATE ENDED>;
if v != v goto label;
v = 1;
goto label;
label:
v = 0;
label:
staticinvoke <org.apache.parquet.Preconditions: void checkState(boolean,java.lang.String)>(v, "Cannot return unfinished footer.");
v = v.<org.apache.parquet.hadoop.ParquetFileWriter: org.apache.parquet.hadoop.metadata.ParquetMetadata footer>;
return v;
}
public static org.apache.parquet.hadoop.metadata.ParquetMetadata mergeMetadataFiles(java.util.List, oadd.org.apache.hadoop.conf.Configuration) throws java.io.IOException
{
java.util.List v;
org.apache.parquet.hadoop.metadata.StrictKeyValueMetadataMergeStrategy v;
oadd.org.apache.hadoop.conf.Configuration v;
org.apache.parquet.hadoop.metadata.ParquetMetadata v;
v := @parameter: java.util.List;
v := @parameter: oadd.org.apache.hadoop.conf.Configuration;
v = new org.apache.parquet.hadoop.metadata.StrictKeyValueMetadataMergeStrategy;
specialinvoke v.<org.apache.parquet.hadoop.metadata.StrictKeyValueMetadataMergeStrategy: void <init>()>();
v = staticinvoke <org.apache.parquet.hadoop.ParquetFileWriter: org.apache.parquet.hadoop.metadata.ParquetMetadata mergeMetadataFiles(java.util.List,oadd.org.apache.hadoop.conf.Configuration,org.apache.parquet.hadoop.metadata.KeyValueMetadataMergeStrategy)>(v, v, v);
return v;
}
public static org.apache.parquet.hadoop.metadata.ParquetMetadata mergeMetadataFiles(java.util.List, oadd.org.apache.hadoop.conf.Configuration, org.apache.parquet.hadoop.metadata.KeyValueMetadataMergeStrategy) throws java.io.IOException
{
org.apache.parquet.format.converter.ParquetMetadataConverter$MetadataFilter v;
java.util.ArrayList v;
oadd.org.apache.hadoop.conf.Configuration v;
boolean v, v, v;
java.util.Iterator v;
org.apache.parquet.hadoop.metadata.KeyValueMetadataMergeStrategy v;
java.util.List v, v;
org.apache.parquet.hadoop.metadata.FileMetaData v, v;
java.lang.Object v;
org.apache.parquet.hadoop.metadata.ParquetMetadata v, v;
org.apache.parquet.hadoop.metadata.GlobalMetaData v;
v := @parameter: java.util.List;
v := @parameter: oadd.org.apache.hadoop.conf.Configuration;
v := @parameter: org.apache.parquet.hadoop.metadata.KeyValueMetadataMergeStrategy;
v = interfaceinvoke v.<java.util.List: boolean isEmpty()>();
if v != 0 goto label;
v = 1;
goto label;
label:
v = 0;
label:
staticinvoke <org.apache.parquet.Preconditions: void checkArgument(boolean,java.lang.String)>(v, "Cannot merge an empty list of metadata");
v = null;
v = new java.util.ArrayList;
specialinvoke v.<java.util.ArrayList: void <init>()>();
v = interfaceinvoke v.<java.util.List: java.util.Iterator iterator()>();
label:
v = interfaceinvoke v.<java.util.Iterator: boolean hasNext()>();
if v == 0 goto label;
v = interfaceinvoke v.<java.util.Iterator: java.lang.Object next()>();
v = <org.apache.parquet.format.converter.ParquetMetadataConverter: org.apache.parquet.format.converter.ParquetMetadataConverter$MetadataFilter NO_FILTER>;
v = staticinvoke <org.apache.parquet.hadoop.ParquetFileReader: org.apache.parquet.hadoop.metadata.ParquetMetadata readFooter(oadd.org.apache.hadoop.conf.Configuration,oadd.org.apache.hadoop.fs.Path,org.apache.parquet.format.converter.ParquetMetadataConverter$MetadataFilter)>(v, v, v);
v = virtualinvoke v.<org.apache.parquet.hadoop.metadata.ParquetMetadata: org.apache.parquet.hadoop.metadata.FileMetaData getFileMetaData()>();
v = staticinvoke <org.apache.parquet.hadoop.ParquetFileWriter: org.apache.parquet.hadoop.metadata.GlobalMetaData mergeInto(org.apache.parquet.hadoop.metadata.FileMetaData,org.apache.parquet.hadoop.metadata.GlobalMetaData,boolean)>(v, v, 1);
v = virtualinvoke v.<org.apache.parquet.hadoop.metadata.ParquetMetadata: java.util.List getBlocks()>();
interfaceinvoke v.<java.util.List: boolean addAll(java.util.Collection)>(v);
goto label;
label:
v = new org.apache.parquet.hadoop.metadata.ParquetMetadata;
v = virtualinvoke v.<org.apache.parquet.hadoop.metadata.GlobalMetaData: org.apache.parquet.hadoop.metadata.FileMetaData merge(org.apache.parquet.hadoop.metadata.KeyValueMetadataMergeStrategy)>(v);
specialinvoke v.<org.apache.parquet.hadoop.metadata.ParquetMetadata: void <init>(org.apache.parquet.hadoop.metadata.FileMetaData,java.util.List)>(v, v);
return v;
}
public static void writeMergedMetadataFile(java.util.List, oadd.org.apache.hadoop.fs.Path, oadd.org.apache.hadoop.conf.Configuration) throws java.io.IOException
{
java.util.List v;
oadd.org.apache.hadoop.conf.Configuration v;
org.apache.parquet.hadoop.metadata.ParquetMetadata v;
oadd.org.apache.hadoop.fs.Path v;
oadd.org.apache.hadoop.fs.FileSystem v;
v := @parameter: java.util.List;
v := @parameter: oadd.org.apache.hadoop.fs.Path;
v := @parameter: oadd.org.apache.hadoop.conf.Configuration;
v = staticinvoke <org.apache.parquet.hadoop.ParquetFileWriter: org.apache.parquet.hadoop.metadata.ParquetMetadata mergeMetadataFiles(java.util.List,oadd.org.apache.hadoop.conf.Configuration)>(v, v);
v = virtualinvoke v.<oadd.org.apache.hadoop.fs.Path: oadd.org.apache.hadoop.fs.FileSystem getFileSystem(oadd.org.apache.hadoop.conf.Configuration)>(v);
staticinvoke <org.apache.parquet.hadoop.ParquetFileWriter: void writeMetadataFile(oadd.org.apache.hadoop.fs.Path,org.apache.parquet.hadoop.metadata.ParquetMetadata,oadd.org.apache.hadoop.fs.FileSystem)>(v, v, v);
return;
}
public static void writeMetadataFile(oadd.org.apache.hadoop.conf.Configuration, oadd.org.apache.hadoop.fs.Path, java.util.List) throws java.io.IOException
{
java.util.List v;
oadd.org.apache.hadoop.conf.Configuration v;
oadd.org.apache.hadoop.fs.Path v;
org.apache.parquet.hadoop.ParquetOutputFormat$JobSummaryLevel v;
v := @parameter: oadd.org.apache.hadoop.conf.Configuration;
v := @parameter: oadd.org.apache.hadoop.fs.Path;
v := @parameter: java.util.List;
v = <org.apache.parquet.hadoop.ParquetOutputFormat$JobSummaryLevel: org.apache.parquet.hadoop.ParquetOutputFormat$JobSummaryLevel ALL>;
staticinvoke <org.apache.parquet.hadoop.ParquetFileWriter: void writeMetadataFile(oadd.org.apache.hadoop.conf.Configuration,oadd.org.apache.hadoop.fs.Path,java.util.List,org.apache.parquet.hadoop.ParquetOutputFormat$JobSummaryLevel)>(v, v, v, v);
return;
}
public static void writeMetadataFile(oadd.org.apache.hadoop.conf.Configuration, oadd.org.apache.hadoop.fs.Path, java.util.List, org.apache.parquet.hadoop.ParquetOutputFormat$JobSummaryLevel) throws java.io.IOException
{
java.util.List v, v;
oadd.org.apache.hadoop.conf.Configuration v;
org.apache.parquet.hadoop.metadata.ParquetMetadata v;
java.lang.String v;
oadd.org.apache.hadoop.fs.Path v, v;
org.apache.parquet.hadoop.ParquetOutputFormat$JobSummaryLevel v, v, v, v;
oadd.org.apache.hadoop.fs.FileSystem v;
boolean v;
v := @parameter: oadd.org.apache.hadoop.conf.Configuration;
v := @parameter: oadd.org.apache.hadoop.fs.Path;
v := @parameter: java.util.List;
v := @parameter: org.apache.parquet.hadoop.ParquetOutputFormat$JobSummaryLevel;
v = <org.apache.parquet.hadoop.ParquetOutputFormat$JobSummaryLevel: org.apache.parquet.hadoop.ParquetOutputFormat$JobSummaryLevel ALL>;
if v == v goto label;
v = <org.apache.parquet.hadoop.ParquetOutputFormat$JobSummaryLevel: org.apache.parquet.hadoop.ParquetOutputFormat$JobSummaryLevel COMMON_ONLY>;
if v != v goto label;
label:
v = 1;
goto label;
label:
v = 0;
label:
v = dynamicinvoke "makeConcatWithConstants" <java.lang.String (org.apache.parquet.hadoop.ParquetOutputFormat$JobSummaryLevel)>(v) <java.lang.invoke.StringConcatFactory: java.lang.invoke.CallSite makeConcatWithConstants(java.lang.invoke.MethodHandles$Lookup,java.lang.String,java.lang.invoke.MethodType,java.lang.String,java.lang.Object[])>("Unsupported level: \u0001");
staticinvoke <org.apache.parquet.Preconditions: void checkArgument(boolean,java.lang.String)>(v, v);
v = virtualinvoke v.<oadd.org.apache.hadoop.fs.Path: oadd.org.apache.hadoop.fs.FileSystem getFileSystem(oadd.org.apache.hadoop.conf.Configuration)>(v);
v = virtualinvoke v.<oadd.org.apache.hadoop.fs.Path: oadd.org.apache.hadoop.fs.Path makeQualified(oadd.org.apache.hadoop.fs.FileSystem)>(v);
v = staticinvoke <org.apache.parquet.hadoop.ParquetFileWriter: org.apache.parquet.hadoop.metadata.ParquetMetadata mergeFooters(oadd.org.apache.hadoop.fs.Path,java.util.List)>(v, v);
v = <org.apache.parquet.hadoop.ParquetOutputFormat$JobSummaryLevel: org.apache.parquet.hadoop.ParquetOutputFormat$JobSummaryLevel ALL>;
if v != v goto label;
staticinvoke <org.apache.parquet.hadoop.ParquetFileWriter: void writeMetadataFile(oadd.org.apache.hadoop.fs.Path,org.apache.parquet.hadoop.metadata.ParquetMetadata,oadd.org.apache.hadoop.fs.FileSystem,java.lang.String)>(v, v, v, "_metadata");
label:
v = virtualinvoke v.<org.apache.parquet.hadoop.metadata.ParquetMetadata: java.util.List getBlocks()>();
interfaceinvoke v.<java.util.List: void clear()>();
staticinvoke <org.apache.parquet.hadoop.ParquetFileWriter: void writeMetadataFile(oadd.org.apache.hadoop.fs.Path,org.apache.parquet.hadoop.metadata.ParquetMetadata,oadd.org.apache.hadoop.fs.FileSystem,java.lang.String)>(v, v, v, "_common_metadata");
return;
}
private static void writeMetadataFile(oadd.org.apache.hadoop.fs.Path, org.apache.parquet.hadoop.metadata.ParquetMetadata, oadd.org.apache.hadoop.fs.FileSystem, java.lang.String) throws java.io.IOException
{
org.apache.parquet.hadoop.metadata.ParquetMetadata v;
java.lang.String v;
oadd.org.apache.hadoop.fs.Path v, v;
oadd.org.apache.hadoop.fs.FileSystem v;
v := @parameter: oadd.org.apache.hadoop.fs.Path;
v := @parameter: org.apache.parquet.hadoop.metadata.ParquetMetadata;
v := @parameter: oadd.org.apache.hadoop.fs.FileSystem;
v := @parameter: java.lang.String;
v = new oadd.org.apache.hadoop.fs.Path;
specialinvoke v.<oadd.org.apache.hadoop.fs.Path: void <init>(oadd.org.apache.hadoop.fs.Path,java.lang.String)>(v, v);
staticinvoke <org.apache.parquet.hadoop.ParquetFileWriter: void writeMetadataFile(oadd.org.apache.hadoop.fs.Path,org.apache.parquet.hadoop.metadata.ParquetMetadata,oadd.org.apache.hadoop.fs.FileSystem)>(v, v, v);
return;
}
private static void writeMetadataFile(oadd.org.apache.hadoop.fs.Path, org.apache.parquet.hadoop.metadata.ParquetMetadata, oadd.org.apache.hadoop.fs.FileSystem) throws java.io.IOException
{
byte[] v;
oadd.org.apache.hadoop.fs.FSDataOutputStream v;
org.apache.parquet.hadoop.metadata.ParquetMetadata v;
oadd.org.apache.hadoop.fs.Path v;
oadd.org.apache.hadoop.fs.FileSystem v;
org.apache.parquet.io.PositionOutputStream v;
v := @parameter: oadd.org.apache.hadoop.fs.Path;
v := @parameter: org.apache.parquet.hadoop.metadata.ParquetMetadata;
v := @parameter: oadd.org.apache.hadoop.fs.FileSystem;
v = virtualinvoke v.<oadd.org.apache.hadoop.fs.FileSystem: oadd.org.apache.hadoop.fs.FSDataOutputStream create(oadd.org.apache.hadoop.fs.Path)>(v);
v = staticinvoke <org.apache.parquet.hadoop.util.HadoopStreams: org.apache.parquet.io.PositionOutputStream wrap(oadd.org.apache.hadoop.fs.FSDataOutputStream)>(v);
v = <org.apache.parquet.hadoop.ParquetFileWriter: byte[] MAGIC>;
virtualinvoke v.<org.apache.parquet.io.PositionOutputStream: void write(byte[])>(v);
staticinvoke <org.apache.parquet.hadoop.ParquetFileWriter: void serializeFooter(org.apache.parquet.hadoop.metadata.ParquetMetadata,org.apache.parquet.io.PositionOutputStream,org.apache.parquet.crypto.InternalFileEncryptor)>(v, v, null);
virtualinvoke v.<org.apache.parquet.io.PositionOutputStream: void close()>();
return;
}
static org.apache.parquet.hadoop.metadata.ParquetMetadata mergeFooters(oadd.org.apache.hadoop.fs.Path, java.util.List)
{
java.util.List v;
org.apache.parquet.hadoop.metadata.StrictKeyValueMetadataMergeStrategy v;
org.apache.parquet.hadoop.metadata.ParquetMetadata v;
oadd.org.apache.hadoop.fs.Path v;
v := @parameter: oadd.org.apache.hadoop.fs.Path;
v := @parameter: java.util.List;
v = new org.apache.parquet.hadoop.metadata.StrictKeyValueMetadataMergeStrategy;
specialinvoke v.<org.apache.parquet.hadoop.metadata.StrictKeyValueMetadataMergeStrategy: void <init>()>();
v = staticinvoke <org.apache.parquet.hadoop.ParquetFileWriter: org.apache.parquet.hadoop.metadata.ParquetMetadata mergeFooters(oadd.org.apache.hadoop.fs.Path,java.util.List,org.apache.parquet.hadoop.metadata.KeyValueMetadataMergeStrategy)>(v, v, v);
return v;
}
static org.apache.parquet.hadoop.metadata.ParquetMetadata mergeFooters(oadd.org.apache.hadoop.fs.Path, java.util.List, org.apache.parquet.hadoop.metadata.KeyValueMetadataMergeStrategy)
{
java.util.ArrayList v;
int v;
java.lang.String v, v, v, v;
java.net.URI v, v;
boolean v, v, v, v;
java.util.Iterator v, v;
org.apache.parquet.hadoop.metadata.KeyValueMetadataMergeStrategy v;
java.util.List v, v;
org.apache.parquet.hadoop.metadata.FileMetaData v, v;
org.apache.parquet.io.ParquetEncodingException v;
java.lang.Object v, v;
org.apache.parquet.hadoop.metadata.ParquetMetadata v, v, v;
oadd.org.apache.hadoop.fs.Path v, v;
org.apache.parquet.hadoop.metadata.GlobalMetaData v;
v := @parameter: oadd.org.apache.hadoop.fs.Path;
v := @parameter: java.util.List;
v := @parameter: org.apache.parquet.hadoop.metadata.KeyValueMetadataMergeStrategy;
v = virtualinvoke v.<oadd.org.apache.hadoop.fs.Path: java.net.URI toUri()>();
v = virtualinvoke v.<java.net.URI: java.lang.String getPath()>();
v = null;
v = new java.util.ArrayList;
specialinvoke v.<java.util.ArrayList: void <init>()>();
v = interfaceinvoke v.<java.util.List: java.util.Iterator iterator()>();
label:
v = interfaceinvoke v.<java.util.Iterator: boolean hasNext()>();
if v == 0 goto label;
v = interfaceinvoke v.<java.util.Iterator: java.lang.Object next()>();
v = virtualinvoke v.<org.apache.parquet.hadoop.Footer: oadd.org.apache.hadoop.fs.Path getFile()>();
v = virtualinvoke v.<oadd.org.apache.hadoop.fs.Path: java.net.URI toUri()>();
v = virtualinvoke v.<java.net.URI: java.lang.String getPath()>();
v = virtualinvoke v.<java.lang.String: boolean startsWith(java.lang.String)>(v);
if v != 0 goto label;
v = new org.apache.parquet.io.ParquetEncodingException;
v = dynamicinvoke "makeConcatWithConstants" <java.lang.String (java.lang.String,oadd.org.apache.hadoop.fs.Path)>(v, v) <java.lang.invoke.StringConcatFactory: java.lang.invoke.CallSite makeConcatWithConstants(java.lang.invoke.MethodHandles$Lookup,java.lang.String,java.lang.invoke.MethodType,java.lang.String,java.lang.Object[])>("\u invalid: all the files must be contained in the root \u0001");
specialinvoke v.<org.apache.parquet.io.ParquetEncodingException: void <init>(java.lang.String)>(v);
throw v;
label:
v = virtualinvoke v.<java.lang.String: int length()>();
v = virtualinvoke v.<java.lang.String: java.lang.String substring(int)>(v);
label:
v = virtualinvoke v.<java.lang.String: boolean startsWith(java.lang.String)>("/");
if v == 0 goto label;
v = virtualinvoke v.<java.lang.String: java.lang.String substring(int)>(1);
goto label;
label:
v = virtualinvoke v.<org.apache.parquet.hadoop.Footer: org.apache.parquet.hadoop.metadata.ParquetMetadata getParquetMetadata()>();
v = virtualinvoke v.<org.apache.parquet.hadoop.metadata.ParquetMetadata: org.apache.parquet.hadoop.metadata.FileMetaData getFileMetaData()>();
v = staticinvoke <org.apache.parquet.hadoop.ParquetFileWriter: org.apache.parquet.hadoop.metadata.GlobalMetaData mergeInto(org.apache.parquet.hadoop.metadata.FileMetaData,org.apache.parquet.hadoop.metadata.GlobalMetaData)>(v, v);
v = virtualinvoke v.<org.apache.parquet.hadoop.Footer: org.apache.parquet.hadoop.metadata.ParquetMetadata getParquetMetadata()>();
v = virtualinvoke v.<org.apache.parquet.hadoop.metadata.ParquetMetadata: java.util.List getBlocks()>();
v = interfaceinvoke v.<java.util.List: java.util.Iterator iterator()>();
label:
v = interfaceinvoke v.<java.util.Iterator: boolean hasNext()>();
if v == 0 goto label;
v = interfaceinvoke v.<java.util.Iterator: java.lang.Object next()>();
virtualinvoke v.<org.apache.parquet.hadoop.metadata.BlockMetaData: void setPath(java.lang.String)>(v);
interfaceinvoke v.<java.util.List: boolean add(java.lang.Object)>(v);
goto label;
label:
v = new org.apache.parquet.hadoop.metadata.ParquetMetadata;
v = virtualinvoke v.<org.apache.parquet.hadoop.metadata.GlobalMetaData: org.apache.parquet.hadoop.metadata.FileMetaData merge(org.apache.parquet.hadoop.metadata.KeyValueMetadataMergeStrategy)>(v);
specialinvoke v.<org.apache.parquet.hadoop.metadata.ParquetMetadata: void <init>(org.apache.parquet.hadoop.metadata.FileMetaData,java.util.List)>(v, v);
return v;
}
public long getPos() throws java.io.IOException
{
long v;
org.apache.parquet.hadoop.ParquetFileWriter v;
org.apache.parquet.io.PositionOutputStream v;
v := @this: org.apache.parquet.hadoop.ParquetFileWriter;
v = v.<org.apache.parquet.hadoop.ParquetFileWriter: org.apache.parquet.io.PositionOutputStream out>;
v = virtualinvoke v.<org.apache.parquet.io.PositionOutputStream: long getPos()>();
return v;
}
public long getNextRowGroupSize() throws java.io.IOException
{
org.apache.parquet.hadoop.ParquetFileWriter v;
long v;
org.apache.parquet.hadoop.ParquetFileWriter$AlignmentStrategy v;
org.apache.parquet.io.PositionOutputStream v;
v := @this: org.apache.parquet.hadoop.ParquetFileWriter;
v = v.<org.apache.parquet.hadoop.ParquetFileWriter: org.apache.parquet.hadoop.ParquetFileWriter$AlignmentStrategy alignment>;
v = v.<org.apache.parquet.hadoop.ParquetFileWriter: org.apache.parquet.io.PositionOutputStream out>;
v = interfaceinvoke v.<org.apache.parquet.hadoop.ParquetFileWriter$AlignmentStrategy: long nextRowGroupSize(org.apache.parquet.io.PositionOutputStream)>(v);
return v;
}
static org.apache.parquet.hadoop.metadata.GlobalMetaData getGlobalMetaData(java.util.List)
{
java.util.List v;
org.apache.parquet.hadoop.metadata.GlobalMetaData v;
v := @parameter: java.util.List;
v = staticinvoke <org.apache.parquet.hadoop.ParquetFileWriter: org.apache.parquet.hadoop.metadata.GlobalMetaData getGlobalMetaData(java.util.List,boolean)>(v, 1);
return v;
}
static org.apache.parquet.hadoop.metadata.GlobalMetaData getGlobalMetaData(java.util.List, boolean)
{
java.util.Iterator v;
java.util.List v;
org.apache.parquet.hadoop.metadata.FileMetaData v;
java.lang.Object v;
org.apache.parquet.hadoop.metadata.ParquetMetadata v;
org.apache.parquet.hadoop.metadata.GlobalMetaData v;
boolean v, v;
v := @parameter: java.util.List;
v := @parameter: boolean;
v = null;
v = interfaceinvoke v.<java.util.List: java.util.Iterator iterator()>();
label:
v = interfaceinvoke v.<java.util.Iterator: boolean hasNext()>();
if v == 0 goto label;
v = interfaceinvoke v.<java.util.Iterator: java.lang.Object next()>();
v = virtualinvoke v.<org.apache.parquet.hadoop.Footer: org.apache.parquet.hadoop.metadata.ParquetMetadata getParquetMetadata()>();
v = virtualinvoke v.<org.apache.parquet.hadoop.metadata.ParquetMetadata: org.apache.parquet.hadoop.metadata.FileMetaData getFileMetaData()>();
v = staticinvoke <org.apache.parquet.hadoop.ParquetFileWriter: org.apache.parquet.hadoop.metadata.GlobalMetaData mergeInto(org.apache.parquet.hadoop.metadata.FileMetaData,org.apache.parquet.hadoop.metadata.GlobalMetaData,boolean)>(v, v, v);
goto label;
label:
return v;
}
static org.apache.parquet.hadoop.metadata.GlobalMetaData mergeInto(org.apache.parquet.hadoop.metadata.FileMetaData, org.apache.parquet.hadoop.metadata.GlobalMetaData)
{
org.apache.parquet.hadoop.metadata.FileMetaData v;
org.apache.parquet.hadoop.metadata.GlobalMetaData v, v;
v := @parameter: org.apache.parquet.hadoop.metadata.FileMetaData;
v := @parameter: org.apache.parquet.hadoop.metadata.GlobalMetaData;
v = staticinvoke <org.apache.parquet.hadoop.ParquetFileWriter: org.apache.parquet.hadoop.metadata.GlobalMetaData mergeInto(org.apache.parquet.hadoop.metadata.FileMetaData,org.apache.parquet.hadoop.metadata.GlobalMetaData,boolean)>(v, v, 1);
return v;
}
static org.apache.parquet.hadoop.metadata.GlobalMetaData mergeInto(org.apache.parquet.hadoop.metadata.FileMetaData, org.apache.parquet.hadoop.metadata.GlobalMetaData, boolean)
{
java.util.HashMap v;
java.util.HashSet v;
java.util.Map v, v;
java.lang.String v;
java.util.LinkedHashSet v;
boolean v, v, v;
java.util.Iterator v;
java.util.Set v, v;
org.apache.parquet.schema.MessageType v, v, v, v;
org.apache.parquet.hadoop.metadata.FileMetaData v;
java.lang.Object v, v, v, v, v;
org.apache.parquet.hadoop.metadata.GlobalMetaData v, v;
v := @parameter: org.apache.parquet.hadoop.metadata.FileMetaData;
v := @parameter: org.apache.parquet.hadoop.metadata.GlobalMetaData;
v := @parameter: boolean;
v = null;
v = new java.util.HashMap;
specialinvoke v.<java.util.HashMap: void <init>()>();
v = new java.util.HashSet;
specialinvoke v.<java.util.HashSet: void <init>()>();
if v == null goto label;
v = virtualinvoke v.<org.apache.parquet.hadoop.metadata.GlobalMetaData: org.apache.parquet.schema.MessageType getSchema()>();
v = virtualinvoke v.<org.apache.parquet.hadoop.metadata.GlobalMetaData: java.util.Map getKeyValueMetaData()>();
interfaceinvoke v.<java.util.Map: void putAll(java.util.Map)>(v);
v = virtualinvoke v.<org.apache.parquet.hadoop.metadata.GlobalMetaData: java.util.Set getCreatedBy()>();
interfaceinvoke v.<java.util.Set: boolean addAll(java.util.Collection)>(v);
label:
if v != null goto label;
v = virtualinvoke v.<org.apache.parquet.hadoop.metadata.FileMetaData: org.apache.parquet.schema.MessageType getSchema()>();
if v != null goto label;
label:
if v == null goto label;
v = virtualinvoke v.<org.apache.parquet.hadoop.metadata.FileMetaData: org.apache.parquet.schema.MessageType getSchema()>();
v = virtualinvoke v.<org.apache.parquet.schema.MessageType: boolean equals(java.lang.Object)>(v);
if v != 0 goto label;
label:
v = virtualinvoke v.<org.apache.parquet.hadoop.metadata.FileMetaData: org.apache.parquet.schema.MessageType getSchema()>();
v = staticinvoke <org.apache.parquet.hadoop.ParquetFileWriter: org.apache.parquet.schema.MessageType mergeInto(org.apache.parquet.schema.MessageType,org.apache.parquet.schema.MessageType,boolean)>(v, v, v);
label:
v = virtualinvoke v.<org.apache.parquet.hadoop.metadata.FileMetaData: java.util.Map getKeyValueMetaData()>();
v = interfaceinvoke v.<java.util.Map: java.util.Set entrySet()>();
v = interfaceinvoke v.<java.util.Set: java.util.Iterator iterator()>();
label:
v = interfaceinvoke v.<java.util.Iterator: boolean hasNext()>();
if v == 0 goto label;
v = interfaceinvoke v.<java.util.Iterator: java.lang.Object next()>();
v = interfaceinvoke v.<java.util.Map$Entry: java.lang.Object getKey()>();
v = interfaceinvoke v.<java.util.Map: java.lang.Object get(java.lang.Object)>(v);
if v != null goto label;
v = new java.util.LinkedHashSet;
specialinvoke v.<java.util.LinkedHashSet: void <init>()>();
v = v;
v = interfaceinvoke v.<java.util.Map$Entry: java.lang.Object getKey()>();
interfaceinvoke v.<java.util.Map: java.lang.Object put(java.lang.Object,java.lang.Object)>(v, v);
label:
v = interfaceinvoke v.<java.util.Map$Entry: java.lang.Object getValue()>();
interfaceinvoke v.<java.util.Set: boolean add(java.lang.Object)>(v);
goto label;
label:
v = virtualinvoke v.<org.apache.parquet.hadoop.metadata.FileMetaData: java.lang.String getCreatedBy()>();
interfaceinvoke v.<java.util.Set: boolean add(java.lang.Object)>(v);
v = new org.apache.parquet.hadoop.metadata.GlobalMetaData;
specialinvoke v.<org.apache.parquet.hadoop.metadata.GlobalMetaData: void <init>(org.apache.parquet.schema.MessageType,java.util.Map,java.util.Set)>(v, v, v);
return v;
}
static org.apache.parquet.schema.MessageType mergeInto(org.apache.parquet.schema.MessageType, org.apache.parquet.schema.MessageType)
{
org.apache.parquet.schema.MessageType v, v, v;
v := @parameter: org.apache.parquet.schema.MessageType;
v := @parameter: org.apache.parquet.schema.MessageType;
v = staticinvoke <org.apache.parquet.hadoop.ParquetFileWriter: org.apache.parquet.schema.MessageType mergeInto(org.apache.parquet.schema.MessageType,org.apache.parquet.schema.MessageType,boolean)>(v, v, 1);
return v;
}
static org.apache.parquet.schema.MessageType mergeInto(org.apache.parquet.schema.MessageType, org.apache.parquet.schema.MessageType, boolean)
{
org.apache.parquet.schema.MessageType v, v, v;
boolean v;
v := @parameter: org.apache.parquet.schema.MessageType;
v := @parameter: org.apache.parquet.schema.MessageType;
v := @parameter: boolean;
if v != null goto label;
return v;
label:
v = virtualinvoke v.<org.apache.parquet.schema.MessageType: org.apache.parquet.schema.MessageType union(org.apache.parquet.schema.MessageType,boolean)>(v, v);
return v;
}
static void <clinit>()
{
byte[] v, v;
java.lang.ThreadLocal v;
org.slf4j.Logger v;
java.util.function.Supplier v;
java.nio.charset.Charset v, v;
java.lang.String v, v;
v = staticinvoke <org.slf4j.LoggerFactory: org.slf4j.Logger getLogger(java.lang.Class)>(class "Lorg/apache/parquet/hadoop/ParquetFileWriter;");
<org.apache.parquet.hadoop.ParquetFileWriter: org.slf4j.Logger LOG> = v;
v = "PAR1";
v = <java.nio.charset.StandardCharsets: java.nio.charset.Charset US_ASCII>;
v = virtualinvoke v.<java.lang.String: byte[] getBytes(java.nio.charset.Charset)>(v);
<org.apache.parquet.hadoop.ParquetFileWriter: byte[] MAGIC> = v;
v = "PARE";
v = <java.nio.charset.StandardCharsets: java.nio.charset.Charset US_ASCII>;
v = virtualinvoke v.<java.lang.String: byte[] getBytes(java.nio.charset.Charset)>(v);
<org.apache.parquet.hadoop.ParquetFileWriter: byte[] EFMAGIC> = v;
v = staticinvoke <org.apache.parquet.hadoop.ParquetFileWriter$lambda_static_0__679: java.util.function.Supplier bootstrap$()>();
v = staticinvoke <java.lang.ThreadLocal: java.lang.ThreadLocal withInitial(java.util.function.Supplier)>(v);
<org.apache.parquet.hadoop.ParquetFileWriter: java.lang.ThreadLocal COPY_BUFFER> = v;
return;
}
}