public class oadd.org.apache.hadoop.io.compress.zstd.ZStandardDecompressor extends java.lang.Object implements oadd.org.apache.hadoop.io.compress.Decompressor
{
private static final org.slf4j.Logger LOG;
private long stream;
private int directBufferSize;
private java.nio.ByteBuffer compressedDirectBuf;
private int compressedDirectBufOff;
private int bytesInCompressedBuffer;
private java.nio.ByteBuffer uncompressedDirectBuf;
private byte[] userBuf;
private int userBufOff;
private int userBufferBytesToConsume;
private boolean finished;
private int remaining;
private static boolean nativeZStandardLoaded;
static final boolean $assertionsDisabled;
public static boolean isNativeCodeLoaded()
{
boolean v;
v = <oadd.org.apache.hadoop.io.compress.zstd.ZStandardDecompressor: boolean nativeZStandardLoaded>;
return v;
}
public static int getRecommendedBufferSize()
{
int v;
v = staticinvoke <oadd.org.apache.hadoop.io.compress.zstd.ZStandardDecompressor: int getStreamSize()>();
return v;
}
public void <init>()
{
int v;
oadd.org.apache.hadoop.io.compress.zstd.ZStandardDecompressor v;
v := @this: oadd.org.apache.hadoop.io.compress.zstd.ZStandardDecompressor;
v = staticinvoke <oadd.org.apache.hadoop.io.compress.zstd.ZStandardDecompressor: int getStreamSize()>();
specialinvoke v.<oadd.org.apache.hadoop.io.compress.zstd.ZStandardDecompressor: void <init>(int)>(v);
return;
}
public void <init>(int)
{
long v;
java.nio.ByteBuffer v, v, v;
oadd.org.apache.hadoop.io.compress.zstd.ZStandardDecompressor v;
int v, v, v, v;
v := @this: oadd.org.apache.hadoop.io.compress.zstd.ZStandardDecompressor;
v := @parameter: int;
specialinvoke v.<java.lang.Object: void <init>()>();
v.<oadd.org.apache.hadoop.io.compress.zstd.ZStandardDecompressor: java.nio.ByteBuffer compressedDirectBuf> = null;
v.<oadd.org.apache.hadoop.io.compress.zstd.ZStandardDecompressor: java.nio.ByteBuffer uncompressedDirectBuf> = null;
v.<oadd.org.apache.hadoop.io.compress.zstd.ZStandardDecompressor: byte[] userBuf> = null;
v.<oadd.org.apache.hadoop.io.compress.zstd.ZStandardDecompressor: int userBufOff> = 0;
v.<oadd.org.apache.hadoop.io.compress.zstd.ZStandardDecompressor: int userBufferBytesToConsume> = 0;
v.<oadd.org.apache.hadoop.io.compress.zstd.ZStandardDecompressor: int remaining> = 0;
v.<oadd.org.apache.hadoop.io.compress.zstd.ZStandardDecompressor: int directBufferSize> = v;
v = v.<oadd.org.apache.hadoop.io.compress.zstd.ZStandardDecompressor: int directBufferSize>;
v = staticinvoke <java.nio.ByteBuffer: java.nio.ByteBuffer allocateDirect(int)>(v);
v.<oadd.org.apache.hadoop.io.compress.zstd.ZStandardDecompressor: java.nio.ByteBuffer compressedDirectBuf> = v;
v = v.<oadd.org.apache.hadoop.io.compress.zstd.ZStandardDecompressor: int directBufferSize>;
v = staticinvoke <java.nio.ByteBuffer: java.nio.ByteBuffer allocateDirect(int)>(v);
v.<oadd.org.apache.hadoop.io.compress.zstd.ZStandardDecompressor: java.nio.ByteBuffer uncompressedDirectBuf> = v;
v = v.<oadd.org.apache.hadoop.io.compress.zstd.ZStandardDecompressor: java.nio.ByteBuffer uncompressedDirectBuf>;
v = v.<oadd.org.apache.hadoop.io.compress.zstd.ZStandardDecompressor: int directBufferSize>;
virtualinvoke v.<java.nio.ByteBuffer: java.nio.ByteBuffer position(int)>(v);
v = staticinvoke <oadd.org.apache.hadoop.io.compress.zstd.ZStandardDecompressor: long create()>();
v.<oadd.org.apache.hadoop.io.compress.zstd.ZStandardDecompressor: long stream> = v;
virtualinvoke v.<oadd.org.apache.hadoop.io.compress.zstd.ZStandardDecompressor: void reset()>();
return;
}
public void setInput(byte[], int, int)
{
byte[] v;
java.lang.ArrayIndexOutOfBoundsException v;
java.lang.NullPointerException v;
java.nio.ByteBuffer v, v;
oadd.org.apache.hadoop.io.compress.zstd.ZStandardDecompressor v;
int v, v, v, v, v, v;
v := @this: oadd.org.apache.hadoop.io.compress.zstd.ZStandardDecompressor;
v := @parameter: byte[];
v := @parameter: int;
v := @parameter: int;
if v != null goto label;
v = new java.lang.NullPointerException;
specialinvoke v.<java.lang.NullPointerException: void <init>()>();
throw v;
label:
if v < 0 goto label;
if v < 0 goto label;
v = lengthof v;
v = v - v;
if v <= v goto label;
label:
v = new java.lang.ArrayIndexOutOfBoundsException;
specialinvoke v.<java.lang.ArrayIndexOutOfBoundsException: void <init>()>();
throw v;
label:
v.<oadd.org.apache.hadoop.io.compress.zstd.ZStandardDecompressor: byte[] userBuf> = v;
v.<oadd.org.apache.hadoop.io.compress.zstd.ZStandardDecompressor: int userBufOff> = v;
v.<oadd.org.apache.hadoop.io.compress.zstd.ZStandardDecompressor: int userBufferBytesToConsume> = v;
specialinvoke v.<oadd.org.apache.hadoop.io.compress.zstd.ZStandardDecompressor: void setInputFromSavedData()>();
v = v.<oadd.org.apache.hadoop.io.compress.zstd.ZStandardDecompressor: java.nio.ByteBuffer uncompressedDirectBuf>;
v = v.<oadd.org.apache.hadoop.io.compress.zstd.ZStandardDecompressor: int directBufferSize>;
virtualinvoke v.<java.nio.ByteBuffer: java.nio.ByteBuffer limit(int)>(v);
v = v.<oadd.org.apache.hadoop.io.compress.zstd.ZStandardDecompressor: java.nio.ByteBuffer uncompressedDirectBuf>;
v = v.<oadd.org.apache.hadoop.io.compress.zstd.ZStandardDecompressor: int directBufferSize>;
virtualinvoke v.<java.nio.ByteBuffer: java.nio.ByteBuffer position(int)>(v);
return;
}
private void setInputFromSavedData()
{
byte[] v;
java.nio.ByteBuffer v, v, v;
oadd.org.apache.hadoop.io.compress.zstd.ZStandardDecompressor v;
int v, v, v, v, v, v, v, v, v, v, v, v, v;
boolean v;
v := @this: oadd.org.apache.hadoop.io.compress.zstd.ZStandardDecompressor;
v.<oadd.org.apache.hadoop.io.compress.zstd.ZStandardDecompressor: int compressedDirectBufOff> = 0;
v = v.<oadd.org.apache.hadoop.io.compress.zstd.ZStandardDecompressor: int userBufferBytesToConsume>;
v.<oadd.org.apache.hadoop.io.compress.zstd.ZStandardDecompressor: int bytesInCompressedBuffer> = v;
v = v.<oadd.org.apache.hadoop.io.compress.zstd.ZStandardDecompressor: int bytesInCompressedBuffer>;
v = v.<oadd.org.apache.hadoop.io.compress.zstd.ZStandardDecompressor: int directBufferSize>;
if v <= v goto label;
v = v.<oadd.org.apache.hadoop.io.compress.zstd.ZStandardDecompressor: int directBufferSize>;
v.<oadd.org.apache.hadoop.io.compress.zstd.ZStandardDecompressor: int bytesInCompressedBuffer> = v;
label:
v = v.<oadd.org.apache.hadoop.io.compress.zstd.ZStandardDecompressor: java.nio.ByteBuffer compressedDirectBuf>;
virtualinvoke v.<java.nio.ByteBuffer: java.nio.ByteBuffer rewind()>();
v = v.<oadd.org.apache.hadoop.io.compress.zstd.ZStandardDecompressor: java.nio.ByteBuffer compressedDirectBuf>;
v = v.<oadd.org.apache.hadoop.io.compress.zstd.ZStandardDecompressor: byte[] userBuf>;
v = v.<oadd.org.apache.hadoop.io.compress.zstd.ZStandardDecompressor: int userBufOff>;
v = v.<oadd.org.apache.hadoop.io.compress.zstd.ZStandardDecompressor: int bytesInCompressedBuffer>;
virtualinvoke v.<java.nio.ByteBuffer: java.nio.ByteBuffer put(byte[],int,int)>(v, v, v);
v = v.<oadd.org.apache.hadoop.io.compress.zstd.ZStandardDecompressor: java.nio.ByteBuffer compressedDirectBuf>;
v = virtualinvoke v.<java.nio.ByteBuffer: int position()>();
if v <= 0 goto label;
v = v.<oadd.org.apache.hadoop.io.compress.zstd.ZStandardDecompressor: boolean finished>;
if v == 0 goto label;
v.<oadd.org.apache.hadoop.io.compress.zstd.ZStandardDecompressor: boolean finished> = 0;
label:
v = v.<oadd.org.apache.hadoop.io.compress.zstd.ZStandardDecompressor: int userBufOff>;
v = v.<oadd.org.apache.hadoop.io.compress.zstd.ZStandardDecompressor: int bytesInCompressedBuffer>;
v = v + v;
v.<oadd.org.apache.hadoop.io.compress.zstd.ZStandardDecompressor: int userBufOff> = v;
v = v.<oadd.org.apache.hadoop.io.compress.zstd.ZStandardDecompressor: int userBufferBytesToConsume>;
v = v.<oadd.org.apache.hadoop.io.compress.zstd.ZStandardDecompressor: int bytesInCompressedBuffer>;
v = v - v;
v.<oadd.org.apache.hadoop.io.compress.zstd.ZStandardDecompressor: int userBufferBytesToConsume> = v;
return;
}
public void setDictionary(byte[], int, int)
{
byte[] v;
java.lang.UnsupportedOperationException v;
oadd.org.apache.hadoop.io.compress.zstd.ZStandardDecompressor v;
int v, v;
v := @this: oadd.org.apache.hadoop.io.compress.zstd.ZStandardDecompressor;
v := @parameter: byte[];
v := @parameter: int;
v := @parameter: int;
v = new java.lang.UnsupportedOperationException;
specialinvoke v.<java.lang.UnsupportedOperationException: void <init>(java.lang.String)>("Dictionary support is not enabled");
throw v;
}
public boolean needsInput()
{
oadd.org.apache.hadoop.io.compress.zstd.ZStandardDecompressor v;
int v, v, v, v, v;
java.nio.ByteBuffer v;
v := @this: oadd.org.apache.hadoop.io.compress.zstd.ZStandardDecompressor;
v = v.<oadd.org.apache.hadoop.io.compress.zstd.ZStandardDecompressor: java.nio.ByteBuffer uncompressedDirectBuf>;
v = virtualinvoke v.<java.nio.ByteBuffer: int remaining()>();
if v <= 0 goto label;
return 0;
label:
v = v.<oadd.org.apache.hadoop.io.compress.zstd.ZStandardDecompressor: int bytesInCompressedBuffer>;
v = v.<oadd.org.apache.hadoop.io.compress.zstd.ZStandardDecompressor: int compressedDirectBufOff>;
v = v - v;
if v > 0 goto label;
v = v.<oadd.org.apache.hadoop.io.compress.zstd.ZStandardDecompressor: int userBufferBytesToConsume>;
if v > 0 goto label;
return 1;
label:
specialinvoke v.<oadd.org.apache.hadoop.io.compress.zstd.ZStandardDecompressor: void setInputFromSavedData()>();
label:
return 0;
}
public boolean needsDictionary()
{
oadd.org.apache.hadoop.io.compress.zstd.ZStandardDecompressor v;
v := @this: oadd.org.apache.hadoop.io.compress.zstd.ZStandardDecompressor;
return 0;
}
public boolean finished()
{
oadd.org.apache.hadoop.io.compress.zstd.ZStandardDecompressor v;
int v;
java.nio.ByteBuffer v;
boolean v, v;
v := @this: oadd.org.apache.hadoop.io.compress.zstd.ZStandardDecompressor;
v = v.<oadd.org.apache.hadoop.io.compress.zstd.ZStandardDecompressor: boolean finished>;
if v == 0 goto label;
v = v.<oadd.org.apache.hadoop.io.compress.zstd.ZStandardDecompressor: java.nio.ByteBuffer uncompressedDirectBuf>;
v = virtualinvoke v.<java.nio.ByteBuffer: int remaining()>();
if v != 0 goto label;
v = 1;
goto label;
label:
v = 0;
label:
return v;
}
public int decompress(byte[], int, int) throws java.io.IOException
{
byte[] v;
java.lang.NullPointerException v;
java.nio.ByteBuffer v, v, v, v, v, v;
oadd.org.apache.hadoop.io.compress.zstd.ZStandardDecompressor v;
int v, v, v, v, v, v, v, v, v, v, v, v, v;
boolean v;
java.lang.ArrayIndexOutOfBoundsException v;
v := @this: oadd.org.apache.hadoop.io.compress.zstd.ZStandardDecompressor;
v := @parameter: byte[];
v := @parameter: int;
v := @parameter: int;
specialinvoke v.<oadd.org.apache.hadoop.io.compress.zstd.ZStandardDecompressor: void checkStream()>();
if v != null goto label;
v = new java.lang.NullPointerException;
specialinvoke v.<java.lang.NullPointerException: void <init>()>();
throw v;
label:
if v < 0 goto label;
if v < 0 goto label;
v = lengthof v;
v = v - v;
if v <= v goto label;
label:
v = new java.lang.ArrayIndexOutOfBoundsException;
specialinvoke v.<java.lang.ArrayIndexOutOfBoundsException: void <init>()>();
throw v;
label:
v = v.<oadd.org.apache.hadoop.io.compress.zstd.ZStandardDecompressor: java.nio.ByteBuffer uncompressedDirectBuf>;
v = virtualinvoke v.<java.nio.ByteBuffer: int remaining()>();
if v <= 0 goto label;
v = specialinvoke v.<oadd.org.apache.hadoop.io.compress.zstd.ZStandardDecompressor: int populateUncompressedBuffer(byte[],int,int,int)>(v, v, v, v);
return v;
label:
v = v.<oadd.org.apache.hadoop.io.compress.zstd.ZStandardDecompressor: java.nio.ByteBuffer uncompressedDirectBuf>;
virtualinvoke v.<java.nio.ByteBuffer: java.nio.ByteBuffer rewind()>();
v = v.<oadd.org.apache.hadoop.io.compress.zstd.ZStandardDecompressor: java.nio.ByteBuffer uncompressedDirectBuf>;
v = v.<oadd.org.apache.hadoop.io.compress.zstd.ZStandardDecompressor: int directBufferSize>;
virtualinvoke v.<java.nio.ByteBuffer: java.nio.ByteBuffer limit(int)>(v);
v = v.<oadd.org.apache.hadoop.io.compress.zstd.ZStandardDecompressor: java.nio.ByteBuffer compressedDirectBuf>;
v = v.<oadd.org.apache.hadoop.io.compress.zstd.ZStandardDecompressor: int compressedDirectBufOff>;
v = v.<oadd.org.apache.hadoop.io.compress.zstd.ZStandardDecompressor: int bytesInCompressedBuffer>;
v = v.<oadd.org.apache.hadoop.io.compress.zstd.ZStandardDecompressor: java.nio.ByteBuffer uncompressedDirectBuf>;
v = v.<oadd.org.apache.hadoop.io.compress.zstd.ZStandardDecompressor: int directBufferSize>;
v = specialinvoke v.<oadd.org.apache.hadoop.io.compress.zstd.ZStandardDecompressor: int inflateBytesDirect(java.nio.ByteBuffer,int,int,java.nio.ByteBuffer,int,int)>(v, v, v, v, 0, v);
v = v.<oadd.org.apache.hadoop.io.compress.zstd.ZStandardDecompressor: int remaining>;
if v <= 0 goto label;
v = v.<oadd.org.apache.hadoop.io.compress.zstd.ZStandardDecompressor: boolean finished>;
if v == 0 goto label;
v.<oadd.org.apache.hadoop.io.compress.zstd.ZStandardDecompressor: boolean finished> = 0;
label:
v = v.<oadd.org.apache.hadoop.io.compress.zstd.ZStandardDecompressor: java.nio.ByteBuffer uncompressedDirectBuf>;
virtualinvoke v.<java.nio.ByteBuffer: java.nio.ByteBuffer limit(int)>(v);
v = specialinvoke v.<oadd.org.apache.hadoop.io.compress.zstd.ZStandardDecompressor: int populateUncompressedBuffer(byte[],int,int,int)>(v, v, v, v);
return v;
}
public int getRemaining()
{
oadd.org.apache.hadoop.io.compress.zstd.ZStandardDecompressor v;
int v, v, v;
v := @this: oadd.org.apache.hadoop.io.compress.zstd.ZStandardDecompressor;
specialinvoke v.<oadd.org.apache.hadoop.io.compress.zstd.ZStandardDecompressor: void checkStream()>();
v = v.<oadd.org.apache.hadoop.io.compress.zstd.ZStandardDecompressor: int userBufferBytesToConsume>;
v = v.<oadd.org.apache.hadoop.io.compress.zstd.ZStandardDecompressor: int remaining>;
v = v + v;
return v;
}
public void reset()
{
oadd.org.apache.hadoop.io.compress.zstd.ZStandardDecompressor v;
int v, v;
long v;
java.nio.ByteBuffer v, v;
v := @this: oadd.org.apache.hadoop.io.compress.zstd.ZStandardDecompressor;
specialinvoke v.<oadd.org.apache.hadoop.io.compress.zstd.ZStandardDecompressor: void checkStream()>();
v = v.<oadd.org.apache.hadoop.io.compress.zstd.ZStandardDecompressor: long stream>;
staticinvoke <oadd.org.apache.hadoop.io.compress.zstd.ZStandardDecompressor: void init(long)>(v);
v.<oadd.org.apache.hadoop.io.compress.zstd.ZStandardDecompressor: int remaining> = 0;
v.<oadd.org.apache.hadoop.io.compress.zstd.ZStandardDecompressor: boolean finished> = 0;
v.<oadd.org.apache.hadoop.io.compress.zstd.ZStandardDecompressor: int compressedDirectBufOff> = 0;
v.<oadd.org.apache.hadoop.io.compress.zstd.ZStandardDecompressor: int bytesInCompressedBuffer> = 0;
v = v.<oadd.org.apache.hadoop.io.compress.zstd.ZStandardDecompressor: java.nio.ByteBuffer uncompressedDirectBuf>;
v = v.<oadd.org.apache.hadoop.io.compress.zstd.ZStandardDecompressor: int directBufferSize>;
virtualinvoke v.<java.nio.ByteBuffer: java.nio.ByteBuffer limit(int)>(v);
v = v.<oadd.org.apache.hadoop.io.compress.zstd.ZStandardDecompressor: java.nio.ByteBuffer uncompressedDirectBuf>;
v = v.<oadd.org.apache.hadoop.io.compress.zstd.ZStandardDecompressor: int directBufferSize>;
virtualinvoke v.<java.nio.ByteBuffer: java.nio.ByteBuffer position(int)>(v);
v.<oadd.org.apache.hadoop.io.compress.zstd.ZStandardDecompressor: int userBufOff> = 0;
v.<oadd.org.apache.hadoop.io.compress.zstd.ZStandardDecompressor: int userBufferBytesToConsume> = 0;
return;
}
public void end()
{
byte v;
oadd.org.apache.hadoop.io.compress.zstd.ZStandardDecompressor v;
long v, v;
v := @this: oadd.org.apache.hadoop.io.compress.zstd.ZStandardDecompressor;
v = v.<oadd.org.apache.hadoop.io.compress.zstd.ZStandardDecompressor: long stream>;
v = v cmp 0L;
if v == 0 goto label;
v = v.<oadd.org.apache.hadoop.io.compress.zstd.ZStandardDecompressor: long stream>;
staticinvoke <oadd.org.apache.hadoop.io.compress.zstd.ZStandardDecompressor: void free(long)>(v);
v.<oadd.org.apache.hadoop.io.compress.zstd.ZStandardDecompressor: long stream> = 0L;
label:
return;
}
protected void finalize()
{
oadd.org.apache.hadoop.io.compress.zstd.ZStandardDecompressor v;
v := @this: oadd.org.apache.hadoop.io.compress.zstd.ZStandardDecompressor;
virtualinvoke v.<oadd.org.apache.hadoop.io.compress.zstd.ZStandardDecompressor: void reset()>();
return;
}
private void checkStream()
{
byte v;
oadd.org.apache.hadoop.io.compress.zstd.ZStandardDecompressor v;
long v;
java.lang.NullPointerException v;
v := @this: oadd.org.apache.hadoop.io.compress.zstd.ZStandardDecompressor;
v = v.<oadd.org.apache.hadoop.io.compress.zstd.ZStandardDecompressor: long stream>;
v = v cmp 0L;
if v != 0 goto label;
v = new java.lang.NullPointerException;
specialinvoke v.<java.lang.NullPointerException: void <init>(java.lang.String)>("Stream not initialized");
throw v;
label:
return;
}
private int populateUncompressedBuffer(byte[], int, int, int)
{
byte[] v;
oadd.org.apache.hadoop.io.compress.zstd.ZStandardDecompressor v;
int v, v, v, v;
java.nio.ByteBuffer v;
v := @this: oadd.org.apache.hadoop.io.compress.zstd.ZStandardDecompressor;
v := @parameter: byte[];
v := @parameter: int;
v := @parameter: int;
v := @parameter: int;
v = staticinvoke <java.lang.Math: int min(int,int)>(v, v);
v = v.<oadd.org.apache.hadoop.io.compress.zstd.ZStandardDecompressor: java.nio.ByteBuffer uncompressedDirectBuf>;
virtualinvoke v.<java.nio.ByteBuffer: java.nio.ByteBuffer get(byte[],int,int)>(v, v, v);
return v;
}
private static native void initIDs();
private static native long create();
private static native void init(long);
private native int inflateBytesDirect(java.nio.ByteBuffer, int, int, java.nio.ByteBuffer, int, int);
private static native void free(long);
private static native int getStreamSize();
int inflateDirect(java.nio.ByteBuffer, java.nio.ByteBuffer) throws java.io.IOException
{
java.lang.AssertionError v;
java.nio.ByteBuffer v, v;
oadd.org.apache.hadoop.io.compress.zstd.ZStandardDecompressor v;
int v, v, v, v, v, v, v, v, v, v;
boolean v, v;
v := @this: oadd.org.apache.hadoop.io.compress.zstd.ZStandardDecompressor;
v := @parameter: java.nio.ByteBuffer;
v := @parameter: java.nio.ByteBuffer;
v = <oadd.org.apache.hadoop.io.compress.zstd.ZStandardDecompressor: boolean $assertionsDisabled>;
if v != 0 goto label;
v = v instanceof oadd.org.apache.hadoop.io.compress.zstd.ZStandardDecompressor$ZStandardDirectDecompressor;
if v != 0 goto label;
v = new java.lang.AssertionError;
specialinvoke v.<java.lang.AssertionError: void <init>()>();
throw v;
label:
v = virtualinvoke v.<java.nio.ByteBuffer: int position()>();
v = virtualinvoke v.<java.nio.ByteBuffer: int position()>();
v = virtualinvoke v.<java.nio.ByteBuffer: int limit()>();
v = virtualinvoke v.<java.nio.ByteBuffer: int position()>();
v = virtualinvoke v.<java.nio.ByteBuffer: int limit()>();
v = specialinvoke v.<oadd.org.apache.hadoop.io.compress.zstd.ZStandardDecompressor: int inflateBytesDirect(java.nio.ByteBuffer,int,int,java.nio.ByteBuffer,int,int)>(v, v, v, v, v, v);
v = v + v;
virtualinvoke v.<java.nio.ByteBuffer: java.nio.ByteBuffer position(int)>(v);
v = v.<oadd.org.apache.hadoop.io.compress.zstd.ZStandardDecompressor: int bytesInCompressedBuffer>;
if v <= 0 goto label;
v = v.<oadd.org.apache.hadoop.io.compress.zstd.ZStandardDecompressor: int compressedDirectBufOff>;
virtualinvoke v.<java.nio.ByteBuffer: java.nio.ByteBuffer position(int)>(v);
goto label;
label:
v = virtualinvoke v.<java.nio.ByteBuffer: int limit()>();
virtualinvoke v.<java.nio.ByteBuffer: java.nio.ByteBuffer position(int)>(v);
label:
return v;
}
static void <clinit>()
{
java.lang.Throwable v;
org.slf4j.Logger v, v;
java.lang.Class v;
java.lang.String v;
boolean v, v, v;
v = class "Loadd/org/apache/hadoop/io/compress/zstd/ZStandardDecompressor;";
v = virtualinvoke v.<java.lang.Class: boolean desiredAssertionStatus()>();
if v != 0 goto label;
v = 1;
goto label;
label:
v = 0;
label:
<oadd.org.apache.hadoop.io.compress.zstd.ZStandardDecompressor: boolean $assertionsDisabled> = v;
v = staticinvoke <org.slf4j.LoggerFactory: org.slf4j.Logger getLogger(java.lang.Class)>(class "Loadd/org/apache/hadoop/io/compress/zstd/ZStandardDecompressor;");
<oadd.org.apache.hadoop.io.compress.zstd.ZStandardDecompressor: org.slf4j.Logger LOG> = v;
<oadd.org.apache.hadoop.io.compress.zstd.ZStandardDecompressor: boolean nativeZStandardLoaded> = 0;
v = staticinvoke <oadd.org.apache.hadoop.util.NativeCodeLoader: boolean isNativeCodeLoaded()>();
if v == 0 goto label;
label:
staticinvoke <oadd.org.apache.hadoop.io.compress.zstd.ZStandardDecompressor: void initIDs()>();
<oadd.org.apache.hadoop.io.compress.zstd.ZStandardDecompressor: boolean nativeZStandardLoaded> = 1;
label:
goto label;
label:
v := @caughtexception;
v = <oadd.org.apache.hadoop.io.compress.zstd.ZStandardDecompressor: org.slf4j.Logger LOG>;
v = dynamicinvoke "makeConcatWithConstants" <java.lang.String (java.lang.Throwable)>(v) <java.lang.invoke.StringConcatFactory: java.lang.invoke.CallSite makeConcatWithConstants(java.lang.invoke.MethodHandles$Lookup,java.lang.String,java.lang.invoke.MethodType,java.lang.String,java.lang.Object[])>("Error loading zstandard native libraries: \u0001");
interfaceinvoke v.<org.slf4j.Logger: void warn(java.lang.String)>(v);
label:
return;
catch java.lang.Throwable from label to label with label;
}
}